1 /*-
2 * Copyright (c) 2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28 #include "opt_acpi.h"
29 #include "opt_platform.h"
30 #include "opt_ddb.h"
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/buf.h>
38 #include <sys/bus.h>
39 #include <sys/cons.h>
40 #include <sys/cpu.h>
41 #include <sys/csan.h>
42 #include <sys/devmap.h>
43 #include <sys/efi.h>
44 #include <sys/exec.h>
45 #include <sys/imgact.h>
46 #include <sys/kdb.h>
47 #include <sys/kernel.h>
48 #include <sys/ktr.h>
49 #include <sys/limits.h>
50 #include <sys/linker.h>
51 #include <sys/msgbuf.h>
52 #include <sys/pcpu.h>
53 #include <sys/physmem.h>
54 #include <sys/proc.h>
55 #include <sys/ptrace.h>
56 #include <sys/reboot.h>
57 #include <sys/rwlock.h>
58 #include <sys/sched.h>
59 #include <sys/signalvar.h>
60 #include <sys/syscallsubr.h>
61 #include <sys/sysent.h>
62 #include <sys/sysproto.h>
63 #include <sys/ucontext.h>
64 #include <sys/vdso.h>
65 #include <sys/vmmeter.h>
66
67 #include <vm/vm.h>
68 #include <vm/vm_param.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_object.h>
71 #include <vm/vm_page.h>
72 #include <vm/vm_phys.h>
73 #include <vm/pmap.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_pager.h>
76
77 #include <machine/armreg.h>
78 #include <machine/cpu.h>
79 #include <machine/debug_monitor.h>
80 #include <machine/kdb.h>
81 #include <machine/machdep.h>
82 #include <machine/metadata.h>
83 #include <machine/md_var.h>
84 #include <machine/pcb.h>
85 #include <machine/reg.h>
86 #include <machine/undefined.h>
87 #include <machine/vmparam.h>
88
89 #ifdef VFP
90 #include <machine/vfp.h>
91 #endif
92
93 #ifdef DEV_ACPI
94 #include <contrib/dev/acpica/include/acpi.h>
95 #include <machine/acpica_machdep.h>
96 #endif
97
98 #ifdef FDT
99 #include <dev/fdt/fdt_common.h>
100 #include <dev/ofw/openfirm.h>
101 #endif
102
103 enum arm64_bus arm64_bus_method = ARM64_BUS_NONE;
104
105 struct pcpu __pcpu[MAXCPU];
106
107 #if defined(PERTHREAD_SSP)
108 /*
109 * The boot SSP canary. Will be replaced with a per-thread canary when
110 * scheduling has started.
111 */
112 uintptr_t boot_canary = 0x49a2d892bc05a0b1ul;
113 #endif
114
115 static struct trapframe proc0_tf;
116
117 int early_boot = 1;
118 int cold = 1;
119 static int boot_el;
120
121 struct kva_md_info kmi;
122
123 int64_t dczva_line_size; /* The size of cache line the dc zva zeroes */
124 int has_pan;
125
126 /*
127 * Physical address of the EFI System Table. Stashed from the metadata hints
128 * passed into the kernel and used by the EFI code to call runtime services.
129 */
130 vm_paddr_t efi_systbl_phys;
131 static struct efi_map_header *efihdr;
132
133 /* pagezero_* implementations are provided in support.S */
134 void pagezero_simple(void *);
135 void pagezero_cache(void *);
136
137 /* pagezero_simple is default pagezero */
138 void (*pagezero)(void *p) = pagezero_simple;
139
140 int (*apei_nmi)(void);
141
142 #if defined(PERTHREAD_SSP_WARNING)
143 static void
print_ssp_warning(void * data __unused)144 print_ssp_warning(void *data __unused)
145 {
146 printf("WARNING: Per-thread SSP is enabled but the compiler is too old to support it\n");
147 }
148 SYSINIT(ssp_warn, SI_SUB_COPYRIGHT, SI_ORDER_ANY, print_ssp_warning, NULL);
149 SYSINIT(ssp_warn2, SI_SUB_LAST, SI_ORDER_ANY, print_ssp_warning, NULL);
150 #endif
151
152 static void
pan_setup(void)153 pan_setup(void)
154 {
155 uint64_t id_aa64mfr1;
156
157 id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
158 if (ID_AA64MMFR1_PAN_VAL(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE)
159 has_pan = 1;
160 }
161
162 void
pan_enable(void)163 pan_enable(void)
164 {
165
166 /*
167 * The LLVM integrated assembler doesn't understand the PAN
168 * PSTATE field. Because of this we need to manually create
169 * the instruction in an asm block. This is equivalent to:
170 * msr pan, #1
171 *
172 * This sets the PAN bit, stopping the kernel from accessing
173 * memory when userspace can also access it unless the kernel
174 * uses the userspace load/store instructions.
175 */
176 if (has_pan) {
177 WRITE_SPECIALREG(sctlr_el1,
178 READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN);
179 __asm __volatile(".inst 0xd500409f | (0x1 << 8)");
180 }
181 }
182
183 bool
has_hyp(void)184 has_hyp(void)
185 {
186
187 return (boot_el == 2);
188 }
189
190 static void
cpu_startup(void * dummy)191 cpu_startup(void *dummy)
192 {
193 vm_paddr_t size;
194 int i;
195
196 printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)realmem),
197 ptoa((uintmax_t)realmem) / 1024 / 1024);
198
199 if (bootverbose) {
200 printf("Physical memory chunk(s):\n");
201 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
202 size = phys_avail[i + 1] - phys_avail[i];
203 printf("%#016jx - %#016jx, %ju bytes (%ju pages)\n",
204 (uintmax_t)phys_avail[i],
205 (uintmax_t)phys_avail[i + 1] - 1,
206 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
207 }
208 }
209
210 printf("avail memory = %ju (%ju MB)\n",
211 ptoa((uintmax_t)vm_free_count()),
212 ptoa((uintmax_t)vm_free_count()) / 1024 / 1024);
213
214 undef_init();
215 install_cpu_errata();
216
217 vm_ksubmap_init(&kmi);
218 bufinit();
219 vm_pager_bufferinit();
220 }
221
222 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
223
224 static void
late_ifunc_resolve(void * dummy __unused)225 late_ifunc_resolve(void *dummy __unused)
226 {
227 link_elf_late_ireloc();
228 }
229 SYSINIT(late_ifunc_resolve, SI_SUB_CPU, SI_ORDER_ANY, late_ifunc_resolve, NULL);
230
231 int
cpu_idle_wakeup(int cpu)232 cpu_idle_wakeup(int cpu)
233 {
234
235 return (0);
236 }
237
238 void
cpu_idle(int busy)239 cpu_idle(int busy)
240 {
241
242 spinlock_enter();
243 if (!busy)
244 cpu_idleclock();
245 if (!sched_runnable())
246 __asm __volatile(
247 "dsb sy \n"
248 "wfi \n");
249 if (!busy)
250 cpu_activeclock();
251 spinlock_exit();
252 }
253
254 void
cpu_halt(void)255 cpu_halt(void)
256 {
257
258 /* We should have shutdown by now, if not enter a low power sleep */
259 intr_disable();
260 while (1) {
261 __asm __volatile("wfi");
262 }
263 }
264
265 /*
266 * Flush the D-cache for non-DMA I/O so that the I-cache can
267 * be made coherent later.
268 */
269 void
cpu_flush_dcache(void * ptr,size_t len)270 cpu_flush_dcache(void *ptr, size_t len)
271 {
272
273 /* ARM64TODO TBD */
274 }
275
276 /* Get current clock frequency for the given CPU ID. */
277 int
cpu_est_clockrate(int cpu_id,uint64_t * rate)278 cpu_est_clockrate(int cpu_id, uint64_t *rate)
279 {
280 struct pcpu *pc;
281
282 pc = pcpu_find(cpu_id);
283 if (pc == NULL || rate == NULL)
284 return (EINVAL);
285
286 if (pc->pc_clock == 0)
287 return (EOPNOTSUPP);
288
289 *rate = pc->pc_clock;
290 return (0);
291 }
292
293 void
cpu_pcpu_init(struct pcpu * pcpu,int cpuid,size_t size)294 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
295 {
296
297 pcpu->pc_acpi_id = 0xffffffff;
298 pcpu->pc_mpidr = 0xffffffff;
299 }
300
301 void
spinlock_enter(void)302 spinlock_enter(void)
303 {
304 struct thread *td;
305 register_t daif;
306
307 td = curthread;
308 if (td->td_md.md_spinlock_count == 0) {
309 daif = intr_disable();
310 td->td_md.md_spinlock_count = 1;
311 td->td_md.md_saved_daif = daif;
312 critical_enter();
313 } else
314 td->td_md.md_spinlock_count++;
315 }
316
317 void
spinlock_exit(void)318 spinlock_exit(void)
319 {
320 struct thread *td;
321 register_t daif;
322
323 td = curthread;
324 daif = td->td_md.md_saved_daif;
325 td->td_md.md_spinlock_count--;
326 if (td->td_md.md_spinlock_count == 0) {
327 critical_exit();
328 intr_restore(daif);
329 }
330 }
331
332 /*
333 * Construct a PCB from a trapframe. This is called from kdb_trap() where
334 * we want to start a backtrace from the function that caused us to enter
335 * the debugger. We have the context in the trapframe, but base the trace
336 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
337 * enough for a backtrace.
338 */
339 void
makectx(struct trapframe * tf,struct pcb * pcb)340 makectx(struct trapframe *tf, struct pcb *pcb)
341 {
342 int i;
343
344 for (i = 0; i < nitems(pcb->pcb_x); i++)
345 pcb->pcb_x[i] = tf->tf_x[i];
346
347 /* NB: pcb_lr is the PC, see PC_REGS() in db_machdep.h */
348 pcb->pcb_lr = tf->tf_elr;
349 pcb->pcb_sp = tf->tf_sp;
350 }
351
352 static void
init_proc0(vm_offset_t kstack)353 init_proc0(vm_offset_t kstack)
354 {
355 struct pcpu *pcpup = &__pcpu[0];
356
357 proc_linkup0(&proc0, &thread0);
358 thread0.td_kstack = kstack;
359 thread0.td_kstack_pages = KSTACK_PAGES;
360 #if defined(PERTHREAD_SSP)
361 thread0.td_md.md_canary = boot_canary;
362 #endif
363 thread0.td_pcb = (struct pcb *)(thread0.td_kstack +
364 thread0.td_kstack_pages * PAGE_SIZE) - 1;
365 thread0.td_pcb->pcb_fpflags = 0;
366 thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate;
367 thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
368 thread0.td_frame = &proc0_tf;
369 pcpup->pc_curpcb = thread0.td_pcb;
370
371 /*
372 * Unmask SError exceptions. They are used to signal a RAS failure,
373 * or other hardware error.
374 */
375 serror_enable();
376 }
377
378 /*
379 * Get an address to be used to write to kernel data that may be mapped
380 * read-only, e.g. to patch kernel code.
381 */
382 bool
arm64_get_writable_addr(vm_offset_t addr,vm_offset_t * out)383 arm64_get_writable_addr(vm_offset_t addr, vm_offset_t *out)
384 {
385 vm_paddr_t pa;
386
387 /* Check if the page is writable */
388 if (PAR_SUCCESS(arm64_address_translate_s1e1w(addr))) {
389 *out = addr;
390 return (true);
391 }
392
393 /*
394 * Find the physical address of the given page.
395 */
396 if (!pmap_klookup(addr, &pa)) {
397 return (false);
398 }
399
400 /*
401 * If it is within the DMAP region and is writable use that.
402 */
403 if (PHYS_IN_DMAP(pa)) {
404 addr = PHYS_TO_DMAP(pa);
405 if (PAR_SUCCESS(arm64_address_translate_s1e1w(addr))) {
406 *out = addr;
407 return (true);
408 }
409 }
410
411 return (false);
412 }
413
414 typedef struct {
415 uint32_t type;
416 uint64_t phys_start;
417 uint64_t virt_start;
418 uint64_t num_pages;
419 uint64_t attr;
420 } EFI_MEMORY_DESCRIPTOR;
421
422 typedef void (*efi_map_entry_cb)(struct efi_md *);
423
424 static void
foreach_efi_map_entry(struct efi_map_header * efihdr,efi_map_entry_cb cb)425 foreach_efi_map_entry(struct efi_map_header *efihdr, efi_map_entry_cb cb)
426 {
427 struct efi_md *map, *p;
428 size_t efisz;
429 int ndesc, i;
430
431 /*
432 * Memory map data provided by UEFI via the GetMemoryMap
433 * Boot Services API.
434 */
435 efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
436 map = (struct efi_md *)((uint8_t *)efihdr + efisz);
437
438 if (efihdr->descriptor_size == 0)
439 return;
440 ndesc = efihdr->memory_size / efihdr->descriptor_size;
441
442 for (i = 0, p = map; i < ndesc; i++,
443 p = efi_next_descriptor(p, efihdr->descriptor_size)) {
444 cb(p);
445 }
446 }
447
448 static void
exclude_efi_map_entry(struct efi_md * p)449 exclude_efi_map_entry(struct efi_md *p)
450 {
451
452 switch (p->md_type) {
453 case EFI_MD_TYPE_CODE:
454 case EFI_MD_TYPE_DATA:
455 case EFI_MD_TYPE_BS_CODE:
456 case EFI_MD_TYPE_BS_DATA:
457 case EFI_MD_TYPE_FREE:
458 /*
459 * We're allowed to use any entry with these types.
460 */
461 break;
462 default:
463 physmem_exclude_region(p->md_phys, p->md_pages * PAGE_SIZE,
464 EXFLAG_NOALLOC);
465 }
466 }
467
468 static void
exclude_efi_map_entries(struct efi_map_header * efihdr)469 exclude_efi_map_entries(struct efi_map_header *efihdr)
470 {
471
472 foreach_efi_map_entry(efihdr, exclude_efi_map_entry);
473 }
474
475 static void
add_efi_map_entry(struct efi_md * p)476 add_efi_map_entry(struct efi_md *p)
477 {
478
479 switch (p->md_type) {
480 case EFI_MD_TYPE_RT_DATA:
481 /*
482 * Runtime data will be excluded after the DMAP
483 * region is created to stop it from being added
484 * to phys_avail.
485 */
486 case EFI_MD_TYPE_CODE:
487 case EFI_MD_TYPE_DATA:
488 case EFI_MD_TYPE_BS_CODE:
489 case EFI_MD_TYPE_BS_DATA:
490 case EFI_MD_TYPE_FREE:
491 /*
492 * We're allowed to use any entry with these types.
493 */
494 physmem_hardware_region(p->md_phys,
495 p->md_pages * PAGE_SIZE);
496 break;
497 }
498 }
499
500 static void
add_efi_map_entries(struct efi_map_header * efihdr)501 add_efi_map_entries(struct efi_map_header *efihdr)
502 {
503
504 foreach_efi_map_entry(efihdr, add_efi_map_entry);
505 }
506
507 static void
print_efi_map_entry(struct efi_md * p)508 print_efi_map_entry(struct efi_md *p)
509 {
510 const char *type;
511 static const char *types[] = {
512 "Reserved",
513 "LoaderCode",
514 "LoaderData",
515 "BootServicesCode",
516 "BootServicesData",
517 "RuntimeServicesCode",
518 "RuntimeServicesData",
519 "ConventionalMemory",
520 "UnusableMemory",
521 "ACPIReclaimMemory",
522 "ACPIMemoryNVS",
523 "MemoryMappedIO",
524 "MemoryMappedIOPortSpace",
525 "PalCode",
526 "PersistentMemory"
527 };
528
529 if (p->md_type < nitems(types))
530 type = types[p->md_type];
531 else
532 type = "<INVALID>";
533 printf("%23s %012lx %12p %08lx ", type, p->md_phys,
534 p->md_virt, p->md_pages);
535 if (p->md_attr & EFI_MD_ATTR_UC)
536 printf("UC ");
537 if (p->md_attr & EFI_MD_ATTR_WC)
538 printf("WC ");
539 if (p->md_attr & EFI_MD_ATTR_WT)
540 printf("WT ");
541 if (p->md_attr & EFI_MD_ATTR_WB)
542 printf("WB ");
543 if (p->md_attr & EFI_MD_ATTR_UCE)
544 printf("UCE ");
545 if (p->md_attr & EFI_MD_ATTR_WP)
546 printf("WP ");
547 if (p->md_attr & EFI_MD_ATTR_RP)
548 printf("RP ");
549 if (p->md_attr & EFI_MD_ATTR_XP)
550 printf("XP ");
551 if (p->md_attr & EFI_MD_ATTR_NV)
552 printf("NV ");
553 if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
554 printf("MORE_RELIABLE ");
555 if (p->md_attr & EFI_MD_ATTR_RO)
556 printf("RO ");
557 if (p->md_attr & EFI_MD_ATTR_RT)
558 printf("RUNTIME");
559 printf("\n");
560 }
561
562 static void
print_efi_map_entries(struct efi_map_header * efihdr)563 print_efi_map_entries(struct efi_map_header *efihdr)
564 {
565
566 printf("%23s %12s %12s %8s %4s\n",
567 "Type", "Physical", "Virtual", "#Pages", "Attr");
568 foreach_efi_map_entry(efihdr, print_efi_map_entry);
569 }
570
571 #ifdef FDT
572 static void
try_load_dtb(caddr_t kmdp)573 try_load_dtb(caddr_t kmdp)
574 {
575 vm_offset_t dtbp;
576
577 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
578 #if defined(FDT_DTB_STATIC)
579 /*
580 * In case the device tree blob was not retrieved (from metadata) try
581 * to use the statically embedded one.
582 */
583 if (dtbp == 0)
584 dtbp = (vm_offset_t)&fdt_static_dtb;
585 #endif
586
587 if (dtbp == (vm_offset_t)NULL) {
588 #ifndef TSLOG
589 printf("ERROR loading DTB\n");
590 #endif
591 return;
592 }
593
594 if (OF_install(OFW_FDT, 0) == FALSE)
595 panic("Cannot install FDT");
596
597 if (OF_init((void *)dtbp) != 0)
598 panic("OF_init failed with the found device tree");
599
600 parse_fdt_bootargs();
601 }
602 #endif
603
604 static bool
bus_probe(void)605 bus_probe(void)
606 {
607 bool has_acpi, has_fdt;
608 char *order, *env;
609
610 has_acpi = has_fdt = false;
611
612 #ifdef FDT
613 has_fdt = (OF_peer(0) != 0);
614 #endif
615 #ifdef DEV_ACPI
616 has_acpi = (AcpiOsGetRootPointer() != 0);
617 #endif
618
619 env = kern_getenv("kern.cfg.order");
620 if (env != NULL) {
621 order = env;
622 while (order != NULL) {
623 if (has_acpi &&
624 strncmp(order, "acpi", 4) == 0 &&
625 (order[4] == ',' || order[4] == '\0')) {
626 arm64_bus_method = ARM64_BUS_ACPI;
627 break;
628 }
629 if (has_fdt &&
630 strncmp(order, "fdt", 3) == 0 &&
631 (order[3] == ',' || order[3] == '\0')) {
632 arm64_bus_method = ARM64_BUS_FDT;
633 break;
634 }
635 order = strchr(order, ',');
636 }
637 freeenv(env);
638
639 /* If we set the bus method it is valid */
640 if (arm64_bus_method != ARM64_BUS_NONE)
641 return (true);
642 }
643 /* If no order or an invalid order was set use the default */
644 if (arm64_bus_method == ARM64_BUS_NONE) {
645 if (has_fdt)
646 arm64_bus_method = ARM64_BUS_FDT;
647 else if (has_acpi)
648 arm64_bus_method = ARM64_BUS_ACPI;
649 }
650
651 /*
652 * If no option was set the default is valid, otherwise we are
653 * setting one to get cninit() working, then calling panic to tell
654 * the user about the invalid bus setup.
655 */
656 return (env == NULL);
657 }
658
659 static void
cache_setup(void)660 cache_setup(void)
661 {
662 int dczva_line_shift;
663 uint32_t dczid_el0;
664
665 identify_cache(READ_SPECIALREG(ctr_el0));
666
667 dczid_el0 = READ_SPECIALREG(dczid_el0);
668
669 /* Check if dc zva is not prohibited */
670 if (dczid_el0 & DCZID_DZP)
671 dczva_line_size = 0;
672 else {
673 /* Same as with above calculations */
674 dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
675 dczva_line_size = sizeof(int) << dczva_line_shift;
676
677 /* Change pagezero function */
678 pagezero = pagezero_cache;
679 }
680 }
681
682 int
memory_mapping_mode(vm_paddr_t pa)683 memory_mapping_mode(vm_paddr_t pa)
684 {
685 struct efi_md *map, *p;
686 size_t efisz;
687 int ndesc, i;
688
689 if (efihdr == NULL)
690 return (VM_MEMATTR_WRITE_BACK);
691
692 /*
693 * Memory map data provided by UEFI via the GetMemoryMap
694 * Boot Services API.
695 */
696 efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
697 map = (struct efi_md *)((uint8_t *)efihdr + efisz);
698
699 if (efihdr->descriptor_size == 0)
700 return (VM_MEMATTR_WRITE_BACK);
701 ndesc = efihdr->memory_size / efihdr->descriptor_size;
702
703 for (i = 0, p = map; i < ndesc; i++,
704 p = efi_next_descriptor(p, efihdr->descriptor_size)) {
705 if (pa < p->md_phys ||
706 pa >= p->md_phys + p->md_pages * EFI_PAGE_SIZE)
707 continue;
708 if (p->md_type == EFI_MD_TYPE_IOMEM ||
709 p->md_type == EFI_MD_TYPE_IOPORT)
710 return (VM_MEMATTR_DEVICE);
711 else if ((p->md_attr & EFI_MD_ATTR_WB) != 0 ||
712 p->md_type == EFI_MD_TYPE_RECLAIM)
713 return (VM_MEMATTR_WRITE_BACK);
714 else if ((p->md_attr & EFI_MD_ATTR_WT) != 0)
715 return (VM_MEMATTR_WRITE_THROUGH);
716 else if ((p->md_attr & EFI_MD_ATTR_WC) != 0)
717 return (VM_MEMATTR_WRITE_COMBINING);
718 break;
719 }
720
721 return (VM_MEMATTR_DEVICE);
722 }
723
724 void
initarm(struct arm64_bootparams * abp)725 initarm(struct arm64_bootparams *abp)
726 {
727 struct efi_fb *efifb;
728 struct pcpu *pcpup;
729 char *env;
730 #ifdef FDT
731 struct mem_region mem_regions[FDT_MEM_REGIONS];
732 int mem_regions_sz;
733 phandle_t root;
734 char dts_version[255];
735 #endif
736 vm_offset_t lastaddr;
737 caddr_t kmdp;
738 bool valid;
739
740 TSRAW(&thread0, TS_ENTER, __func__, NULL);
741
742 boot_el = abp->boot_el;
743
744 /* Parse loader or FDT boot parametes. Determine last used address. */
745 lastaddr = parse_boot_param(abp);
746
747 /* Find the kernel address */
748 kmdp = preload_search_by_type("elf kernel");
749 if (kmdp == NULL)
750 kmdp = preload_search_by_type("elf64 kernel");
751
752 identify_cpu(0);
753 update_special_regs(0);
754
755 link_elf_ireloc(kmdp);
756 try_load_dtb(kmdp);
757
758 efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t);
759
760 /* Load the physical memory ranges */
761 efihdr = (struct efi_map_header *)preload_search_info(kmdp,
762 MODINFO_METADATA | MODINFOMD_EFI_MAP);
763 if (efihdr != NULL)
764 add_efi_map_entries(efihdr);
765 #ifdef FDT
766 else {
767 /* Grab physical memory regions information from device tree. */
768 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
769 NULL) != 0)
770 panic("Cannot get physical memory regions");
771 physmem_hardware_regions(mem_regions, mem_regions_sz);
772 }
773 if (fdt_get_reserved_mem(mem_regions, &mem_regions_sz) == 0)
774 physmem_exclude_regions(mem_regions, mem_regions_sz,
775 EXFLAG_NODUMP | EXFLAG_NOALLOC);
776 #endif
777
778 /* Exclude the EFI framebuffer from our view of physical memory. */
779 efifb = (struct efi_fb *)preload_search_info(kmdp,
780 MODINFO_METADATA | MODINFOMD_EFI_FB);
781 if (efifb != NULL)
782 physmem_exclude_region(efifb->fb_addr, efifb->fb_size,
783 EXFLAG_NOALLOC);
784
785 /* Set the pcpu data, this is needed by pmap_bootstrap */
786 pcpup = &__pcpu[0];
787 pcpu_init(pcpup, 0, sizeof(struct pcpu));
788
789 /*
790 * Set the pcpu pointer with a backup in tpidr_el1 to be
791 * loaded when entering the kernel from userland.
792 */
793 __asm __volatile(
794 "mov x18, %0 \n"
795 "msr tpidr_el1, %0" :: "r"(pcpup));
796
797 /* locore.S sets sp_el0 to &thread0 so no need to set it here. */
798 PCPU_SET(curthread, &thread0);
799 PCPU_SET(midr, get_midr());
800
801 /* Do basic tuning, hz etc */
802 init_param1();
803
804 cache_setup();
805 pan_setup();
806
807 /* Bootstrap enough of pmap to enter the kernel proper */
808 pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
809 KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
810 /* Exclude entries neexed in teh DMAP region, but not phys_avail */
811 if (efihdr != NULL)
812 exclude_efi_map_entries(efihdr);
813 physmem_init_kernel_globals();
814
815 devmap_bootstrap(0, NULL);
816
817 valid = bus_probe();
818
819 cninit();
820 set_ttbr0(abp->kern_ttbr0);
821 cpu_tlb_flushID();
822
823 if (!valid)
824 panic("Invalid bus configuration: %s",
825 kern_getenv("kern.cfg.order"));
826
827 /*
828 * Dump the boot metadata. We have to wait for cninit() since console
829 * output is required. If it's grossly incorrect the kernel will never
830 * make it this far.
831 */
832 if (getenv_is_true("debug.dump_modinfo_at_boot"))
833 preload_dump();
834
835 init_proc0(abp->kern_stack);
836 msgbufinit(msgbufp, msgbufsize);
837 mutex_init();
838 init_param2(physmem);
839
840 dbg_init();
841 kdb_init();
842 #ifdef KDB
843 if ((boothowto & RB_KDB) != 0)
844 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
845 #endif
846 pan_enable();
847
848 kcsan_cpu_init(0);
849
850 env = kern_getenv("kernelname");
851 if (env != NULL)
852 strlcpy(kernelname, env, sizeof(kernelname));
853
854 #ifdef FDT
855 if (arm64_bus_method == ARM64_BUS_FDT) {
856 root = OF_finddevice("/");
857 if (OF_getprop(root, "freebsd,dts-version", dts_version, sizeof(dts_version)) > 0) {
858 if (strcmp(LINUX_DTS_VERSION, dts_version) != 0)
859 printf("WARNING: DTB version is %s while kernel expects %s, "
860 "please update the DTB in the ESP\n",
861 dts_version,
862 LINUX_DTS_VERSION);
863 } else {
864 printf("WARNING: Cannot find freebsd,dts-version property, "
865 "cannot check DTB compliance\n");
866 }
867 }
868 #endif
869
870 if (boothowto & RB_VERBOSE) {
871 if (efihdr != NULL)
872 print_efi_map_entries(efihdr);
873 physmem_print_tables();
874 }
875
876 early_boot = 0;
877
878 TSEXIT();
879 }
880
881 void
dbg_init(void)882 dbg_init(void)
883 {
884
885 /* Clear OS lock */
886 WRITE_SPECIALREG(oslar_el1, 0);
887
888 /* This permits DDB to use debug registers for watchpoints. */
889 dbg_monitor_init();
890
891 /* TODO: Eventually will need to initialize debug registers here. */
892 }
893
894 #ifdef DDB
895 #include <ddb/ddb.h>
896
DB_SHOW_COMMAND(specialregs,db_show_spregs)897 DB_SHOW_COMMAND(specialregs, db_show_spregs)
898 {
899 #define PRINT_REG(reg) \
900 db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
901
902 PRINT_REG(actlr_el1);
903 PRINT_REG(afsr0_el1);
904 PRINT_REG(afsr1_el1);
905 PRINT_REG(aidr_el1);
906 PRINT_REG(amair_el1);
907 PRINT_REG(ccsidr_el1);
908 PRINT_REG(clidr_el1);
909 PRINT_REG(contextidr_el1);
910 PRINT_REG(cpacr_el1);
911 PRINT_REG(csselr_el1);
912 PRINT_REG(ctr_el0);
913 PRINT_REG(currentel);
914 PRINT_REG(daif);
915 PRINT_REG(dczid_el0);
916 PRINT_REG(elr_el1);
917 PRINT_REG(esr_el1);
918 PRINT_REG(far_el1);
919 #if 0
920 /* ARM64TODO: Enable VFP before reading floating-point registers */
921 PRINT_REG(fpcr);
922 PRINT_REG(fpsr);
923 #endif
924 PRINT_REG(id_aa64afr0_el1);
925 PRINT_REG(id_aa64afr1_el1);
926 PRINT_REG(id_aa64dfr0_el1);
927 PRINT_REG(id_aa64dfr1_el1);
928 PRINT_REG(id_aa64isar0_el1);
929 PRINT_REG(id_aa64isar1_el1);
930 PRINT_REG(id_aa64pfr0_el1);
931 PRINT_REG(id_aa64pfr1_el1);
932 PRINT_REG(id_afr0_el1);
933 PRINT_REG(id_dfr0_el1);
934 PRINT_REG(id_isar0_el1);
935 PRINT_REG(id_isar1_el1);
936 PRINT_REG(id_isar2_el1);
937 PRINT_REG(id_isar3_el1);
938 PRINT_REG(id_isar4_el1);
939 PRINT_REG(id_isar5_el1);
940 PRINT_REG(id_mmfr0_el1);
941 PRINT_REG(id_mmfr1_el1);
942 PRINT_REG(id_mmfr2_el1);
943 PRINT_REG(id_mmfr3_el1);
944 #if 0
945 /* Missing from llvm */
946 PRINT_REG(id_mmfr4_el1);
947 #endif
948 PRINT_REG(id_pfr0_el1);
949 PRINT_REG(id_pfr1_el1);
950 PRINT_REG(isr_el1);
951 PRINT_REG(mair_el1);
952 PRINT_REG(midr_el1);
953 PRINT_REG(mpidr_el1);
954 PRINT_REG(mvfr0_el1);
955 PRINT_REG(mvfr1_el1);
956 PRINT_REG(mvfr2_el1);
957 PRINT_REG(revidr_el1);
958 PRINT_REG(sctlr_el1);
959 PRINT_REG(sp_el0);
960 PRINT_REG(spsel);
961 PRINT_REG(spsr_el1);
962 PRINT_REG(tcr_el1);
963 PRINT_REG(tpidr_el0);
964 PRINT_REG(tpidr_el1);
965 PRINT_REG(tpidrro_el0);
966 PRINT_REG(ttbr0_el1);
967 PRINT_REG(ttbr1_el1);
968 PRINT_REG(vbar_el1);
969 #undef PRINT_REG
970 }
971
DB_SHOW_COMMAND(vtop,db_show_vtop)972 DB_SHOW_COMMAND(vtop, db_show_vtop)
973 {
974 uint64_t phys;
975
976 if (have_addr) {
977 phys = arm64_address_translate_s1e1r(addr);
978 db_printf("EL1 physical address reg (read): 0x%016lx\n", phys);
979 phys = arm64_address_translate_s1e1w(addr);
980 db_printf("EL1 physical address reg (write): 0x%016lx\n", phys);
981 phys = arm64_address_translate_s1e0r(addr);
982 db_printf("EL0 physical address reg (read): 0x%016lx\n", phys);
983 phys = arm64_address_translate_s1e0w(addr);
984 db_printf("EL0 physical address reg (write): 0x%016lx\n", phys);
985 } else
986 db_printf("show vtop <virt_addr>\n");
987 }
988 #endif
989