1 /*-
2 * Copyright (c) 2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28 #include "opt_acpi.h"
29 #include "opt_platform.h"
30 #include "opt_ddb.h"
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/buf.h>
38 #include <sys/bus.h>
39 #include <sys/cons.h>
40 #include <sys/cpu.h>
41 #include <sys/csan.h>
42 #include <sys/devmap.h>
43 #include <sys/efi.h>
44 #include <sys/exec.h>
45 #include <sys/imgact.h>
46 #include <sys/kdb.h>
47 #include <sys/kernel.h>
48 #include <sys/ktr.h>
49 #include <sys/limits.h>
50 #include <sys/linker.h>
51 #include <sys/msgbuf.h>
52 #include <sys/pcpu.h>
53 #include <sys/physmem.h>
54 #include <sys/proc.h>
55 #include <sys/ptrace.h>
56 #include <sys/reboot.h>
57 #include <sys/rwlock.h>
58 #include <sys/sched.h>
59 #include <sys/signalvar.h>
60 #include <sys/syscallsubr.h>
61 #include <sys/sysent.h>
62 #include <sys/sysproto.h>
63 #include <sys/ucontext.h>
64 #include <sys/vdso.h>
65 #include <sys/vmmeter.h>
66
67 #include <vm/vm.h>
68 #include <vm/vm_param.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_object.h>
71 #include <vm/vm_page.h>
72 #include <vm/vm_phys.h>
73 #include <vm/pmap.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_pager.h>
76
77 #include <machine/armreg.h>
78 #include <machine/cpu.h>
79 #include <machine/debug_monitor.h>
80 #include <machine/kdb.h>
81 #include <machine/machdep.h>
82 #include <machine/metadata.h>
83 #include <machine/md_var.h>
84 #include <machine/pcb.h>
85 #include <machine/reg.h>
86 #include <machine/undefined.h>
87 #include <machine/vmparam.h>
88
89 #ifdef VFP
90 #include <machine/vfp.h>
91 #endif
92
93 #ifdef DEV_ACPI
94 #include <contrib/dev/acpica/include/acpi.h>
95 #include <machine/acpica_machdep.h>
96 #endif
97
98 #ifdef FDT
99 #include <dev/fdt/fdt_common.h>
100 #include <dev/ofw/openfirm.h>
101 #endif
102
103 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
104 static void set_fpcontext(struct thread *td, mcontext_t *mcp);
105
106 enum arm64_bus arm64_bus_method = ARM64_BUS_NONE;
107
108 struct pcpu __pcpu[MAXCPU];
109
110 static struct trapframe proc0_tf;
111
112 int early_boot = 1;
113 int cold = 1;
114 static int boot_el;
115
116 struct kva_md_info kmi;
117
118 int64_t dczva_line_size; /* The size of cache line the dc zva zeroes */
119 int has_pan;
120
121 /*
122 * Physical address of the EFI System Table. Stashed from the metadata hints
123 * passed into the kernel and used by the EFI code to call runtime services.
124 */
125 vm_paddr_t efi_systbl_phys;
126 static struct efi_map_header *efihdr;
127
128 /* pagezero_* implementations are provided in support.S */
129 void pagezero_simple(void *);
130 void pagezero_cache(void *);
131
132 /* pagezero_simple is default pagezero */
133 void (*pagezero)(void *p) = pagezero_simple;
134
135 int (*apei_nmi)(void);
136
137 static void
pan_setup(void)138 pan_setup(void)
139 {
140 uint64_t id_aa64mfr1;
141
142 id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
143 if (ID_AA64MMFR1_PAN_VAL(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE)
144 has_pan = 1;
145 }
146
147 void
pan_enable(void)148 pan_enable(void)
149 {
150
151 /*
152 * The LLVM integrated assembler doesn't understand the PAN
153 * PSTATE field. Because of this we need to manually create
154 * the instruction in an asm block. This is equivalent to:
155 * msr pan, #1
156 *
157 * This sets the PAN bit, stopping the kernel from accessing
158 * memory when userspace can also access it unless the kernel
159 * uses the userspace load/store instructions.
160 */
161 if (has_pan) {
162 WRITE_SPECIALREG(sctlr_el1,
163 READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN);
164 __asm __volatile(".inst 0xd500409f | (0x1 << 8)");
165 }
166 }
167
168 bool
has_hyp(void)169 has_hyp(void)
170 {
171
172 return (boot_el == 2);
173 }
174
175 static void
cpu_startup(void * dummy)176 cpu_startup(void *dummy)
177 {
178 vm_paddr_t size;
179 int i;
180
181 printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)realmem),
182 ptoa((uintmax_t)realmem) / 1024 / 1024);
183
184 if (bootverbose) {
185 printf("Physical memory chunk(s):\n");
186 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
187 size = phys_avail[i + 1] - phys_avail[i];
188 printf("%#016jx - %#016jx, %ju bytes (%ju pages)\n",
189 (uintmax_t)phys_avail[i],
190 (uintmax_t)phys_avail[i + 1] - 1,
191 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
192 }
193 }
194
195 printf("avail memory = %ju (%ju MB)\n",
196 ptoa((uintmax_t)vm_free_count()),
197 ptoa((uintmax_t)vm_free_count()) / 1024 / 1024);
198
199 undef_init();
200 install_cpu_errata();
201
202 vm_ksubmap_init(&kmi);
203 bufinit();
204 vm_pager_bufferinit();
205 }
206
207 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
208
209 static void
late_ifunc_resolve(void * dummy __unused)210 late_ifunc_resolve(void *dummy __unused)
211 {
212 link_elf_late_ireloc();
213 }
214 SYSINIT(late_ifunc_resolve, SI_SUB_CPU, SI_ORDER_ANY, late_ifunc_resolve, NULL);
215
216 int
cpu_idle_wakeup(int cpu)217 cpu_idle_wakeup(int cpu)
218 {
219
220 return (0);
221 }
222
223 int
fill_regs(struct thread * td,struct reg * regs)224 fill_regs(struct thread *td, struct reg *regs)
225 {
226 struct trapframe *frame;
227
228 frame = td->td_frame;
229 regs->sp = frame->tf_sp;
230 regs->lr = frame->tf_lr;
231 regs->elr = frame->tf_elr;
232 regs->spsr = frame->tf_spsr;
233
234 memcpy(regs->x, frame->tf_x, sizeof(regs->x));
235
236 #ifdef COMPAT_FREEBSD32
237 /*
238 * We may be called here for a 32bits process, if we're using a
239 * 64bits debugger. If so, put PC and SPSR where it expects it.
240 */
241 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
242 regs->x[15] = frame->tf_elr;
243 regs->x[16] = frame->tf_spsr;
244 }
245 #endif
246 return (0);
247 }
248
249 int
set_regs(struct thread * td,struct reg * regs)250 set_regs(struct thread *td, struct reg *regs)
251 {
252 struct trapframe *frame;
253
254 frame = td->td_frame;
255 frame->tf_sp = regs->sp;
256 frame->tf_lr = regs->lr;
257 frame->tf_elr = regs->elr;
258 frame->tf_spsr &= ~PSR_FLAGS;
259 frame->tf_spsr |= regs->spsr & PSR_FLAGS;
260
261 memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
262
263 #ifdef COMPAT_FREEBSD32
264 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
265 /*
266 * We may be called for a 32bits process if we're using
267 * a 64bits debugger. If so, get PC and SPSR from where
268 * it put it.
269 */
270 frame->tf_elr = regs->x[15];
271 frame->tf_spsr = regs->x[16] & PSR_FLAGS;
272 }
273 #endif
274 return (0);
275 }
276
277 int
fill_fpregs(struct thread * td,struct fpreg * regs)278 fill_fpregs(struct thread *td, struct fpreg *regs)
279 {
280 #ifdef VFP
281 struct pcb *pcb;
282
283 pcb = td->td_pcb;
284 if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
285 /*
286 * If we have just been running VFP instructions we will
287 * need to save the state to memcpy it below.
288 */
289 if (td == curthread)
290 vfp_save_state(td, pcb);
291
292 KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
293 ("Called fill_fpregs while the kernel is using the VFP"));
294 memcpy(regs->fp_q, pcb->pcb_fpustate.vfp_regs,
295 sizeof(regs->fp_q));
296 regs->fp_cr = pcb->pcb_fpustate.vfp_fpcr;
297 regs->fp_sr = pcb->pcb_fpustate.vfp_fpsr;
298 } else
299 #endif
300 memset(regs, 0, sizeof(*regs));
301 return (0);
302 }
303
304 int
set_fpregs(struct thread * td,struct fpreg * regs)305 set_fpregs(struct thread *td, struct fpreg *regs)
306 {
307 #ifdef VFP
308 struct pcb *pcb;
309
310 pcb = td->td_pcb;
311 KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
312 ("Called set_fpregs while the kernel is using the VFP"));
313 memcpy(pcb->pcb_fpustate.vfp_regs, regs->fp_q, sizeof(regs->fp_q));
314 pcb->pcb_fpustate.vfp_fpcr = regs->fp_cr;
315 pcb->pcb_fpustate.vfp_fpsr = regs->fp_sr;
316 #endif
317 return (0);
318 }
319
320 int
fill_dbregs(struct thread * td,struct dbreg * regs)321 fill_dbregs(struct thread *td, struct dbreg *regs)
322 {
323 struct debug_monitor_state *monitor;
324 int i;
325 uint8_t debug_ver, nbkpts, nwtpts;
326
327 memset(regs, 0, sizeof(*regs));
328
329 extract_user_id_field(ID_AA64DFR0_EL1, ID_AA64DFR0_DebugVer_SHIFT,
330 &debug_ver);
331 extract_user_id_field(ID_AA64DFR0_EL1, ID_AA64DFR0_BRPs_SHIFT,
332 &nbkpts);
333 extract_user_id_field(ID_AA64DFR0_EL1, ID_AA64DFR0_WRPs_SHIFT,
334 &nwtpts);
335
336 /*
337 * The BRPs field contains the number of breakpoints - 1. Armv8-A
338 * allows the hardware to provide 2-16 breakpoints so this won't
339 * overflow an 8 bit value. The same applies to the WRPs field.
340 */
341 nbkpts++;
342 nwtpts++;
343
344 regs->db_debug_ver = debug_ver;
345 regs->db_nbkpts = nbkpts;
346 regs->db_nwtpts = nwtpts;
347
348 monitor = &td->td_pcb->pcb_dbg_regs;
349 if ((monitor->dbg_flags & DBGMON_ENABLED) != 0) {
350 for (i = 0; i < nbkpts; i++) {
351 regs->db_breakregs[i].dbr_addr = monitor->dbg_bvr[i];
352 regs->db_breakregs[i].dbr_ctrl = monitor->dbg_bcr[i];
353 }
354 for (i = 0; i < nwtpts; i++) {
355 regs->db_watchregs[i].dbw_addr = monitor->dbg_wvr[i];
356 regs->db_watchregs[i].dbw_ctrl = monitor->dbg_wcr[i];
357 }
358 }
359
360 return (0);
361 }
362
363 int
set_dbregs(struct thread * td,struct dbreg * regs)364 set_dbregs(struct thread *td, struct dbreg *regs)
365 {
366 struct debug_monitor_state *monitor;
367 uint64_t addr;
368 uint32_t ctrl;
369 int count;
370 int i;
371
372 monitor = &td->td_pcb->pcb_dbg_regs;
373 count = 0;
374 monitor->dbg_enable_count = 0;
375
376 for (i = 0; i < DBG_BRP_MAX; i++) {
377 addr = regs->db_breakregs[i].dbr_addr;
378 ctrl = regs->db_breakregs[i].dbr_ctrl;
379
380 /* Don't let the user set a breakpoint on a kernel address. */
381 if (addr >= VM_MAXUSER_ADDRESS)
382 return (EINVAL);
383
384 /*
385 * The lowest 2 bits are ignored, so record the effective
386 * address.
387 */
388 addr = rounddown2(addr, 4);
389
390 /*
391 * Some control fields are ignored, and other bits reserved.
392 * Only unlinked, address-matching breakpoints are supported.
393 *
394 * XXX: fields that appear unvalidated, such as BAS, have
395 * constrained undefined behaviour. If the user mis-programs
396 * these, there is no risk to the system.
397 */
398 ctrl &= DBG_BCR_EN | DBG_BCR_PMC | DBG_BCR_BAS;
399 if ((ctrl & DBG_BCR_EN) != 0) {
400 /* Only target EL0. */
401 if ((ctrl & DBG_BCR_PMC) != DBG_BCR_PMC_EL0)
402 return (EINVAL);
403
404 monitor->dbg_enable_count++;
405 }
406
407 monitor->dbg_bvr[i] = addr;
408 monitor->dbg_bcr[i] = ctrl;
409 }
410
411 for (i = 0; i < DBG_WRP_MAX; i++) {
412 addr = regs->db_watchregs[i].dbw_addr;
413 ctrl = regs->db_watchregs[i].dbw_ctrl;
414
415 /* Don't let the user set a watchpoint on a kernel address. */
416 if (addr >= VM_MAXUSER_ADDRESS)
417 return (EINVAL);
418
419 /*
420 * Some control fields are ignored, and other bits reserved.
421 * Only unlinked watchpoints are supported.
422 */
423 ctrl &= DBG_WCR_EN | DBG_WCR_PAC | DBG_WCR_LSC | DBG_WCR_BAS |
424 DBG_WCR_MASK;
425
426 if ((ctrl & DBG_WCR_EN) != 0) {
427 /* Only target EL0. */
428 if ((ctrl & DBG_WCR_PAC) != DBG_WCR_PAC_EL0)
429 return (EINVAL);
430
431 /* Must set at least one of the load/store bits. */
432 if ((ctrl & DBG_WCR_LSC) == 0)
433 return (EINVAL);
434
435 /*
436 * When specifying the address range with BAS, the MASK
437 * field must be zero.
438 */
439 if ((ctrl & DBG_WCR_BAS) != DBG_WCR_BAS_MASK &&
440 (ctrl & DBG_WCR_MASK) != 0)
441 return (EINVAL);
442
443 monitor->dbg_enable_count++;
444 }
445 monitor->dbg_wvr[i] = addr;
446 monitor->dbg_wcr[i] = ctrl;
447 }
448
449 if (monitor->dbg_enable_count > 0)
450 monitor->dbg_flags |= DBGMON_ENABLED;
451
452 return (0);
453 }
454
455 #ifdef COMPAT_FREEBSD32
456 int
fill_regs32(struct thread * td,struct reg32 * regs)457 fill_regs32(struct thread *td, struct reg32 *regs)
458 {
459 int i;
460 struct trapframe *tf;
461
462 tf = td->td_frame;
463 for (i = 0; i < 13; i++)
464 regs->r[i] = tf->tf_x[i];
465 /* For arm32, SP is r13 and LR is r14 */
466 regs->r_sp = tf->tf_x[13];
467 regs->r_lr = tf->tf_x[14];
468 regs->r_pc = tf->tf_elr;
469 regs->r_cpsr = tf->tf_spsr;
470
471 return (0);
472 }
473
474 int
set_regs32(struct thread * td,struct reg32 * regs)475 set_regs32(struct thread *td, struct reg32 *regs)
476 {
477 int i;
478 struct trapframe *tf;
479
480 tf = td->td_frame;
481 for (i = 0; i < 13; i++)
482 tf->tf_x[i] = regs->r[i];
483 /* For arm 32, SP is r13 an LR is r14 */
484 tf->tf_x[13] = regs->r_sp;
485 tf->tf_x[14] = regs->r_lr;
486 tf->tf_elr = regs->r_pc;
487 tf->tf_spsr = regs->r_cpsr;
488
489 return (0);
490 }
491
492 /* XXX fill/set dbregs/fpregs are stubbed on 32-bit arm. */
493 int
fill_fpregs32(struct thread * td,struct fpreg32 * regs)494 fill_fpregs32(struct thread *td, struct fpreg32 *regs)
495 {
496
497 memset(regs, 0, sizeof(*regs));
498 return (0);
499 }
500
501 int
set_fpregs32(struct thread * td,struct fpreg32 * regs)502 set_fpregs32(struct thread *td, struct fpreg32 *regs)
503 {
504
505 return (0);
506 }
507
508 int
fill_dbregs32(struct thread * td,struct dbreg32 * regs)509 fill_dbregs32(struct thread *td, struct dbreg32 *regs)
510 {
511
512 memset(regs, 0, sizeof(*regs));
513 return (0);
514 }
515
516 int
set_dbregs32(struct thread * td,struct dbreg32 * regs)517 set_dbregs32(struct thread *td, struct dbreg32 *regs)
518 {
519
520 return (0);
521 }
522 #endif
523
524 int
ptrace_set_pc(struct thread * td,u_long addr)525 ptrace_set_pc(struct thread *td, u_long addr)
526 {
527
528 td->td_frame->tf_elr = addr;
529 return (0);
530 }
531
532 int
ptrace_single_step(struct thread * td)533 ptrace_single_step(struct thread *td)
534 {
535
536 td->td_frame->tf_spsr |= PSR_SS;
537 td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
538 return (0);
539 }
540
541 int
ptrace_clear_single_step(struct thread * td)542 ptrace_clear_single_step(struct thread *td)
543 {
544
545 td->td_frame->tf_spsr &= ~PSR_SS;
546 td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
547 return (0);
548 }
549
550 void
exec_setregs(struct thread * td,struct image_params * imgp,uintptr_t stack)551 exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
552 {
553 struct trapframe *tf = td->td_frame;
554
555 memset(tf, 0, sizeof(struct trapframe));
556
557 tf->tf_x[0] = stack;
558 tf->tf_sp = STACKALIGN(stack);
559 tf->tf_lr = imgp->entry_addr;
560 tf->tf_elr = imgp->entry_addr;
561 }
562
563 /* Sanity check these are the same size, they will be memcpy'd to and fro */
564 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
565 sizeof((struct gpregs *)0)->gp_x);
566 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
567 sizeof((struct reg *)0)->x);
568
569 int
get_mcontext(struct thread * td,mcontext_t * mcp,int clear_ret)570 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
571 {
572 struct trapframe *tf = td->td_frame;
573
574 if (clear_ret & GET_MC_CLEAR_RET) {
575 mcp->mc_gpregs.gp_x[0] = 0;
576 mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
577 } else {
578 mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
579 mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
580 }
581
582 memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
583 sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
584
585 mcp->mc_gpregs.gp_sp = tf->tf_sp;
586 mcp->mc_gpregs.gp_lr = tf->tf_lr;
587 mcp->mc_gpregs.gp_elr = tf->tf_elr;
588 get_fpcontext(td, mcp);
589
590 return (0);
591 }
592
593 int
set_mcontext(struct thread * td,mcontext_t * mcp)594 set_mcontext(struct thread *td, mcontext_t *mcp)
595 {
596 struct trapframe *tf = td->td_frame;
597 uint32_t spsr;
598
599 spsr = mcp->mc_gpregs.gp_spsr;
600 if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
601 (spsr & PSR_AARCH32) != 0 ||
602 (spsr & PSR_DAIF) != (td->td_frame->tf_spsr & PSR_DAIF))
603 return (EINVAL);
604
605 memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
606
607 tf->tf_sp = mcp->mc_gpregs.gp_sp;
608 tf->tf_lr = mcp->mc_gpregs.gp_lr;
609 tf->tf_elr = mcp->mc_gpregs.gp_elr;
610 tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
611 set_fpcontext(td, mcp);
612
613 return (0);
614 }
615
616 static void
get_fpcontext(struct thread * td,mcontext_t * mcp)617 get_fpcontext(struct thread *td, mcontext_t *mcp)
618 {
619 #ifdef VFP
620 struct pcb *curpcb;
621
622 critical_enter();
623
624 curpcb = curthread->td_pcb;
625
626 if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
627 /*
628 * If we have just been running VFP instructions we will
629 * need to save the state to memcpy it below.
630 */
631 vfp_save_state(td, curpcb);
632
633 KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
634 ("Called get_fpcontext while the kernel is using the VFP"));
635 KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
636 ("Non-userspace FPU flags set in get_fpcontext"));
637 memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_fpustate.vfp_regs,
638 sizeof(mcp->mc_fpregs));
639 mcp->mc_fpregs.fp_cr = curpcb->pcb_fpustate.vfp_fpcr;
640 mcp->mc_fpregs.fp_sr = curpcb->pcb_fpustate.vfp_fpsr;
641 mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
642 mcp->mc_flags |= _MC_FP_VALID;
643 }
644
645 critical_exit();
646 #endif
647 }
648
649 static void
set_fpcontext(struct thread * td,mcontext_t * mcp)650 set_fpcontext(struct thread *td, mcontext_t *mcp)
651 {
652 #ifdef VFP
653 struct pcb *curpcb;
654
655 critical_enter();
656
657 if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
658 curpcb = curthread->td_pcb;
659
660 /*
661 * Discard any vfp state for the current thread, we
662 * are about to override it.
663 */
664 vfp_discard(td);
665
666 KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
667 ("Called set_fpcontext while the kernel is using the VFP"));
668 memcpy(curpcb->pcb_fpustate.vfp_regs, mcp->mc_fpregs.fp_q,
669 sizeof(mcp->mc_fpregs));
670 curpcb->pcb_fpustate.vfp_fpcr = mcp->mc_fpregs.fp_cr;
671 curpcb->pcb_fpustate.vfp_fpsr = mcp->mc_fpregs.fp_sr;
672 curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK;
673 }
674
675 critical_exit();
676 #endif
677 }
678
679 void
cpu_idle(int busy)680 cpu_idle(int busy)
681 {
682
683 spinlock_enter();
684 if (!busy)
685 cpu_idleclock();
686 if (!sched_runnable())
687 __asm __volatile(
688 "dsb sy \n"
689 "wfi \n");
690 if (!busy)
691 cpu_activeclock();
692 spinlock_exit();
693 }
694
695 void
cpu_halt(void)696 cpu_halt(void)
697 {
698
699 /* We should have shutdown by now, if not enter a low power sleep */
700 intr_disable();
701 while (1) {
702 __asm __volatile("wfi");
703 }
704 }
705
706 /*
707 * Flush the D-cache for non-DMA I/O so that the I-cache can
708 * be made coherent later.
709 */
710 void
cpu_flush_dcache(void * ptr,size_t len)711 cpu_flush_dcache(void *ptr, size_t len)
712 {
713
714 /* ARM64TODO TBD */
715 }
716
717 /* Get current clock frequency for the given CPU ID. */
718 int
cpu_est_clockrate(int cpu_id,uint64_t * rate)719 cpu_est_clockrate(int cpu_id, uint64_t *rate)
720 {
721 struct pcpu *pc;
722
723 pc = pcpu_find(cpu_id);
724 if (pc == NULL || rate == NULL)
725 return (EINVAL);
726
727 if (pc->pc_clock == 0)
728 return (EOPNOTSUPP);
729
730 *rate = pc->pc_clock;
731 return (0);
732 }
733
734 void
cpu_pcpu_init(struct pcpu * pcpu,int cpuid,size_t size)735 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
736 {
737
738 pcpu->pc_acpi_id = 0xffffffff;
739 pcpu->pc_mpidr = 0xffffffff;
740 }
741
742 void
spinlock_enter(void)743 spinlock_enter(void)
744 {
745 struct thread *td;
746 register_t daif;
747
748 td = curthread;
749 if (td->td_md.md_spinlock_count == 0) {
750 daif = intr_disable();
751 td->td_md.md_spinlock_count = 1;
752 td->td_md.md_saved_daif = daif;
753 critical_enter();
754 } else
755 td->td_md.md_spinlock_count++;
756 }
757
758 void
spinlock_exit(void)759 spinlock_exit(void)
760 {
761 struct thread *td;
762 register_t daif;
763
764 td = curthread;
765 daif = td->td_md.md_saved_daif;
766 td->td_md.md_spinlock_count--;
767 if (td->td_md.md_spinlock_count == 0) {
768 critical_exit();
769 intr_restore(daif);
770 }
771 }
772
773 #ifndef _SYS_SYSPROTO_H_
774 struct sigreturn_args {
775 ucontext_t *ucp;
776 };
777 #endif
778
779 int
sys_sigreturn(struct thread * td,struct sigreturn_args * uap)780 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
781 {
782 ucontext_t uc;
783 int error;
784
785 if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
786 return (EFAULT);
787
788 error = set_mcontext(td, &uc.uc_mcontext);
789 if (error != 0)
790 return (error);
791
792 /* Restore signal mask. */
793 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
794
795 return (EJUSTRETURN);
796 }
797
798 /*
799 * Construct a PCB from a trapframe. This is called from kdb_trap() where
800 * we want to start a backtrace from the function that caused us to enter
801 * the debugger. We have the context in the trapframe, but base the trace
802 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
803 * enough for a backtrace.
804 */
805 void
makectx(struct trapframe * tf,struct pcb * pcb)806 makectx(struct trapframe *tf, struct pcb *pcb)
807 {
808 int i;
809
810 for (i = 0; i < nitems(pcb->pcb_x); i++)
811 pcb->pcb_x[i] = tf->tf_x[i];
812
813 /* NB: pcb_lr is the PC, see PC_REGS() in db_machdep.h */
814 pcb->pcb_lr = tf->tf_elr;
815 pcb->pcb_sp = tf->tf_sp;
816 }
817
818 void
sendsig(sig_t catcher,ksiginfo_t * ksi,sigset_t * mask)819 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
820 {
821 struct thread *td;
822 struct proc *p;
823 struct trapframe *tf;
824 struct sigframe *fp, frame;
825 struct sigacts *psp;
826 struct sysentvec *sysent;
827 int onstack, sig;
828
829 td = curthread;
830 p = td->td_proc;
831 PROC_LOCK_ASSERT(p, MA_OWNED);
832
833 sig = ksi->ksi_signo;
834 psp = p->p_sigacts;
835 mtx_assert(&psp->ps_mtx, MA_OWNED);
836
837 tf = td->td_frame;
838 onstack = sigonstack(tf->tf_sp);
839
840 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
841 catcher, sig);
842
843 /* Allocate and validate space for the signal handler context. */
844 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
845 SIGISMEMBER(psp->ps_sigonstack, sig)) {
846 fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
847 td->td_sigstk.ss_size);
848 #if defined(COMPAT_43)
849 td->td_sigstk.ss_flags |= SS_ONSTACK;
850 #endif
851 } else {
852 fp = (struct sigframe *)td->td_frame->tf_sp;
853 }
854
855 /* Make room, keeping the stack aligned */
856 fp--;
857 fp = (struct sigframe *)STACKALIGN(fp);
858
859 /* Fill in the frame to copy out */
860 bzero(&frame, sizeof(frame));
861 get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
862 frame.sf_si = ksi->ksi_info;
863 frame.sf_uc.uc_sigmask = *mask;
864 frame.sf_uc.uc_stack = td->td_sigstk;
865 frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) != 0 ?
866 (onstack ? SS_ONSTACK : 0) : SS_DISABLE;
867 mtx_unlock(&psp->ps_mtx);
868 PROC_UNLOCK(td->td_proc);
869
870 /* Copy the sigframe out to the user's stack. */
871 if (copyout(&frame, fp, sizeof(*fp)) != 0) {
872 /* Process has trashed its stack. Kill it. */
873 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
874 PROC_LOCK(p);
875 sigexit(td, SIGILL);
876 }
877
878 tf->tf_x[0]= sig;
879 tf->tf_x[1] = (register_t)&fp->sf_si;
880 tf->tf_x[2] = (register_t)&fp->sf_uc;
881
882 tf->tf_elr = (register_t)catcher;
883 tf->tf_sp = (register_t)fp;
884 sysent = p->p_sysent;
885 if (sysent->sv_sigcode_base != 0)
886 tf->tf_lr = (register_t)sysent->sv_sigcode_base;
887 else
888 tf->tf_lr = (register_t)(sysent->sv_psstrings -
889 *(sysent->sv_szsigcode));
890
891 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
892 tf->tf_sp);
893
894 PROC_LOCK(p);
895 mtx_lock(&psp->ps_mtx);
896 }
897
898 static void
init_proc0(vm_offset_t kstack)899 init_proc0(vm_offset_t kstack)
900 {
901 struct pcpu *pcpup = &__pcpu[0];
902
903 proc_linkup0(&proc0, &thread0);
904 thread0.td_kstack = kstack;
905 thread0.td_kstack_pages = KSTACK_PAGES;
906 thread0.td_pcb = (struct pcb *)(thread0.td_kstack +
907 thread0.td_kstack_pages * PAGE_SIZE) - 1;
908 thread0.td_pcb->pcb_fpflags = 0;
909 thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate;
910 thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
911 thread0.td_frame = &proc0_tf;
912 pcpup->pc_curpcb = thread0.td_pcb;
913 }
914
915 typedef struct {
916 uint32_t type;
917 uint64_t phys_start;
918 uint64_t virt_start;
919 uint64_t num_pages;
920 uint64_t attr;
921 } EFI_MEMORY_DESCRIPTOR;
922
923 typedef void (*efi_map_entry_cb)(struct efi_md *);
924
925 static void
foreach_efi_map_entry(struct efi_map_header * efihdr,efi_map_entry_cb cb)926 foreach_efi_map_entry(struct efi_map_header *efihdr, efi_map_entry_cb cb)
927 {
928 struct efi_md *map, *p;
929 size_t efisz;
930 int ndesc, i;
931
932 /*
933 * Memory map data provided by UEFI via the GetMemoryMap
934 * Boot Services API.
935 */
936 efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
937 map = (struct efi_md *)((uint8_t *)efihdr + efisz);
938
939 if (efihdr->descriptor_size == 0)
940 return;
941 ndesc = efihdr->memory_size / efihdr->descriptor_size;
942
943 for (i = 0, p = map; i < ndesc; i++,
944 p = efi_next_descriptor(p, efihdr->descriptor_size)) {
945 cb(p);
946 }
947 }
948
949 static void
exclude_efi_map_entry(struct efi_md * p)950 exclude_efi_map_entry(struct efi_md *p)
951 {
952
953 switch (p->md_type) {
954 case EFI_MD_TYPE_CODE:
955 case EFI_MD_TYPE_DATA:
956 case EFI_MD_TYPE_BS_CODE:
957 case EFI_MD_TYPE_BS_DATA:
958 case EFI_MD_TYPE_FREE:
959 /*
960 * We're allowed to use any entry with these types.
961 */
962 break;
963 default:
964 physmem_exclude_region(p->md_phys, p->md_pages * PAGE_SIZE,
965 EXFLAG_NOALLOC);
966 }
967 }
968
969 static void
exclude_efi_map_entries(struct efi_map_header * efihdr)970 exclude_efi_map_entries(struct efi_map_header *efihdr)
971 {
972
973 foreach_efi_map_entry(efihdr, exclude_efi_map_entry);
974 }
975
976 static void
add_efi_map_entry(struct efi_md * p)977 add_efi_map_entry(struct efi_md *p)
978 {
979
980 switch (p->md_type) {
981 case EFI_MD_TYPE_RT_DATA:
982 /*
983 * Runtime data will be excluded after the DMAP
984 * region is created to stop it from being added
985 * to phys_avail.
986 */
987 case EFI_MD_TYPE_CODE:
988 case EFI_MD_TYPE_DATA:
989 case EFI_MD_TYPE_BS_CODE:
990 case EFI_MD_TYPE_BS_DATA:
991 case EFI_MD_TYPE_FREE:
992 /*
993 * We're allowed to use any entry with these types.
994 */
995 physmem_hardware_region(p->md_phys,
996 p->md_pages * PAGE_SIZE);
997 break;
998 }
999 }
1000
1001 static void
add_efi_map_entries(struct efi_map_header * efihdr)1002 add_efi_map_entries(struct efi_map_header *efihdr)
1003 {
1004
1005 foreach_efi_map_entry(efihdr, add_efi_map_entry);
1006 }
1007
1008 static void
print_efi_map_entry(struct efi_md * p)1009 print_efi_map_entry(struct efi_md *p)
1010 {
1011 const char *type;
1012 static const char *types[] = {
1013 "Reserved",
1014 "LoaderCode",
1015 "LoaderData",
1016 "BootServicesCode",
1017 "BootServicesData",
1018 "RuntimeServicesCode",
1019 "RuntimeServicesData",
1020 "ConventionalMemory",
1021 "UnusableMemory",
1022 "ACPIReclaimMemory",
1023 "ACPIMemoryNVS",
1024 "MemoryMappedIO",
1025 "MemoryMappedIOPortSpace",
1026 "PalCode",
1027 "PersistentMemory"
1028 };
1029
1030 if (p->md_type < nitems(types))
1031 type = types[p->md_type];
1032 else
1033 type = "<INVALID>";
1034 printf("%23s %012lx %12p %08lx ", type, p->md_phys,
1035 p->md_virt, p->md_pages);
1036 if (p->md_attr & EFI_MD_ATTR_UC)
1037 printf("UC ");
1038 if (p->md_attr & EFI_MD_ATTR_WC)
1039 printf("WC ");
1040 if (p->md_attr & EFI_MD_ATTR_WT)
1041 printf("WT ");
1042 if (p->md_attr & EFI_MD_ATTR_WB)
1043 printf("WB ");
1044 if (p->md_attr & EFI_MD_ATTR_UCE)
1045 printf("UCE ");
1046 if (p->md_attr & EFI_MD_ATTR_WP)
1047 printf("WP ");
1048 if (p->md_attr & EFI_MD_ATTR_RP)
1049 printf("RP ");
1050 if (p->md_attr & EFI_MD_ATTR_XP)
1051 printf("XP ");
1052 if (p->md_attr & EFI_MD_ATTR_NV)
1053 printf("NV ");
1054 if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
1055 printf("MORE_RELIABLE ");
1056 if (p->md_attr & EFI_MD_ATTR_RO)
1057 printf("RO ");
1058 if (p->md_attr & EFI_MD_ATTR_RT)
1059 printf("RUNTIME");
1060 printf("\n");
1061 }
1062
1063 static void
print_efi_map_entries(struct efi_map_header * efihdr)1064 print_efi_map_entries(struct efi_map_header *efihdr)
1065 {
1066
1067 printf("%23s %12s %12s %8s %4s\n",
1068 "Type", "Physical", "Virtual", "#Pages", "Attr");
1069 foreach_efi_map_entry(efihdr, print_efi_map_entry);
1070 }
1071
1072 #ifdef FDT
1073 static void
try_load_dtb(caddr_t kmdp)1074 try_load_dtb(caddr_t kmdp)
1075 {
1076 vm_offset_t dtbp;
1077
1078 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
1079 #if defined(FDT_DTB_STATIC)
1080 /*
1081 * In case the device tree blob was not retrieved (from metadata) try
1082 * to use the statically embedded one.
1083 */
1084 if (dtbp == 0)
1085 dtbp = (vm_offset_t)&fdt_static_dtb;
1086 #endif
1087
1088 if (dtbp == (vm_offset_t)NULL) {
1089 printf("ERROR loading DTB\n");
1090 return;
1091 }
1092
1093 if (OF_install(OFW_FDT, 0) == FALSE)
1094 panic("Cannot install FDT");
1095
1096 if (OF_init((void *)dtbp) != 0)
1097 panic("OF_init failed with the found device tree");
1098
1099 parse_fdt_bootargs();
1100 }
1101 #endif
1102
1103 static bool
bus_probe(void)1104 bus_probe(void)
1105 {
1106 bool has_acpi, has_fdt;
1107 char *order, *env;
1108
1109 has_acpi = has_fdt = false;
1110
1111 #ifdef FDT
1112 has_fdt = (OF_peer(0) != 0);
1113 #endif
1114 #ifdef DEV_ACPI
1115 has_acpi = (AcpiOsGetRootPointer() != 0);
1116 #endif
1117
1118 env = kern_getenv("kern.cfg.order");
1119 if (env != NULL) {
1120 order = env;
1121 while (order != NULL) {
1122 if (has_acpi &&
1123 strncmp(order, "acpi", 4) == 0 &&
1124 (order[4] == ',' || order[4] == '\0')) {
1125 arm64_bus_method = ARM64_BUS_ACPI;
1126 break;
1127 }
1128 if (has_fdt &&
1129 strncmp(order, "fdt", 3) == 0 &&
1130 (order[3] == ',' || order[3] == '\0')) {
1131 arm64_bus_method = ARM64_BUS_FDT;
1132 break;
1133 }
1134 order = strchr(order, ',');
1135 }
1136 freeenv(env);
1137
1138 /* If we set the bus method it is valid */
1139 if (arm64_bus_method != ARM64_BUS_NONE)
1140 return (true);
1141 }
1142 /* If no order or an invalid order was set use the default */
1143 if (arm64_bus_method == ARM64_BUS_NONE) {
1144 if (has_fdt)
1145 arm64_bus_method = ARM64_BUS_FDT;
1146 else if (has_acpi)
1147 arm64_bus_method = ARM64_BUS_ACPI;
1148 }
1149
1150 /*
1151 * If no option was set the default is valid, otherwise we are
1152 * setting one to get cninit() working, then calling panic to tell
1153 * the user about the invalid bus setup.
1154 */
1155 return (env == NULL);
1156 }
1157
1158 static void
cache_setup(void)1159 cache_setup(void)
1160 {
1161 int dczva_line_shift;
1162 uint32_t dczid_el0;
1163
1164 identify_cache(READ_SPECIALREG(ctr_el0));
1165
1166 dczid_el0 = READ_SPECIALREG(dczid_el0);
1167
1168 /* Check if dc zva is not prohibited */
1169 if (dczid_el0 & DCZID_DZP)
1170 dczva_line_size = 0;
1171 else {
1172 /* Same as with above calculations */
1173 dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
1174 dczva_line_size = sizeof(int) << dczva_line_shift;
1175
1176 /* Change pagezero function */
1177 pagezero = pagezero_cache;
1178 }
1179 }
1180
1181 int
memory_mapping_mode(vm_paddr_t pa)1182 memory_mapping_mode(vm_paddr_t pa)
1183 {
1184 struct efi_md *map, *p;
1185 size_t efisz;
1186 int ndesc, i;
1187
1188 if (efihdr == NULL)
1189 return (VM_MEMATTR_WRITE_BACK);
1190
1191 /*
1192 * Memory map data provided by UEFI via the GetMemoryMap
1193 * Boot Services API.
1194 */
1195 efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
1196 map = (struct efi_md *)((uint8_t *)efihdr + efisz);
1197
1198 if (efihdr->descriptor_size == 0)
1199 return (VM_MEMATTR_WRITE_BACK);
1200 ndesc = efihdr->memory_size / efihdr->descriptor_size;
1201
1202 for (i = 0, p = map; i < ndesc; i++,
1203 p = efi_next_descriptor(p, efihdr->descriptor_size)) {
1204 if (pa < p->md_phys ||
1205 pa >= p->md_phys + p->md_pages * EFI_PAGE_SIZE)
1206 continue;
1207 if (p->md_type == EFI_MD_TYPE_IOMEM ||
1208 p->md_type == EFI_MD_TYPE_IOPORT)
1209 return (VM_MEMATTR_DEVICE);
1210 else if ((p->md_attr & EFI_MD_ATTR_WB) != 0 ||
1211 p->md_type == EFI_MD_TYPE_RECLAIM)
1212 return (VM_MEMATTR_WRITE_BACK);
1213 else if ((p->md_attr & EFI_MD_ATTR_WT) != 0)
1214 return (VM_MEMATTR_WRITE_THROUGH);
1215 else if ((p->md_attr & EFI_MD_ATTR_WC) != 0)
1216 return (VM_MEMATTR_WRITE_COMBINING);
1217 break;
1218 }
1219
1220 return (VM_MEMATTR_DEVICE);
1221 }
1222
1223 void
initarm(struct arm64_bootparams * abp)1224 initarm(struct arm64_bootparams *abp)
1225 {
1226 struct efi_fb *efifb;
1227 struct pcpu *pcpup;
1228 char *env;
1229 #ifdef FDT
1230 struct mem_region mem_regions[FDT_MEM_REGIONS];
1231 int mem_regions_sz;
1232 #endif
1233 vm_offset_t lastaddr;
1234 caddr_t kmdp;
1235 bool valid;
1236
1237 boot_el = abp->boot_el;
1238
1239 /* Parse loader or FDT boot parametes. Determine last used address. */
1240 lastaddr = parse_boot_param(abp);
1241
1242 /* Find the kernel address */
1243 kmdp = preload_search_by_type("elf kernel");
1244 if (kmdp == NULL)
1245 kmdp = preload_search_by_type("elf64 kernel");
1246
1247 identify_cpu(0);
1248 update_special_regs(0);
1249
1250 link_elf_ireloc(kmdp);
1251 try_load_dtb(kmdp);
1252
1253 efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t);
1254
1255 /* Load the physical memory ranges */
1256 efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1257 MODINFO_METADATA | MODINFOMD_EFI_MAP);
1258 if (efihdr != NULL)
1259 add_efi_map_entries(efihdr);
1260 #ifdef FDT
1261 else {
1262 /* Grab physical memory regions information from device tree. */
1263 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
1264 NULL) != 0)
1265 panic("Cannot get physical memory regions");
1266 physmem_hardware_regions(mem_regions, mem_regions_sz);
1267 }
1268 if (fdt_get_reserved_mem(mem_regions, &mem_regions_sz) == 0)
1269 physmem_exclude_regions(mem_regions, mem_regions_sz,
1270 EXFLAG_NODUMP | EXFLAG_NOALLOC);
1271 #endif
1272
1273 /* Exclude the EFI framebuffer from our view of physical memory. */
1274 efifb = (struct efi_fb *)preload_search_info(kmdp,
1275 MODINFO_METADATA | MODINFOMD_EFI_FB);
1276 if (efifb != NULL)
1277 physmem_exclude_region(efifb->fb_addr, efifb->fb_size,
1278 EXFLAG_NOALLOC);
1279
1280 /* Set the pcpu data, this is needed by pmap_bootstrap */
1281 pcpup = &__pcpu[0];
1282 pcpu_init(pcpup, 0, sizeof(struct pcpu));
1283
1284 /*
1285 * Set the pcpu pointer with a backup in tpidr_el1 to be
1286 * loaded when entering the kernel from userland.
1287 */
1288 __asm __volatile(
1289 "mov x18, %0 \n"
1290 "msr tpidr_el1, %0" :: "r"(pcpup));
1291
1292 PCPU_SET(curthread, &thread0);
1293 PCPU_SET(midr, get_midr());
1294
1295 /* Do basic tuning, hz etc */
1296 init_param1();
1297
1298 cache_setup();
1299 pan_setup();
1300
1301 /* Bootstrap enough of pmap to enter the kernel proper */
1302 pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
1303 KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
1304 /* Exclude entries neexed in teh DMAP region, but not phys_avail */
1305 if (efihdr != NULL)
1306 exclude_efi_map_entries(efihdr);
1307 physmem_init_kernel_globals();
1308
1309 devmap_bootstrap(0, NULL);
1310
1311 valid = bus_probe();
1312
1313 cninit();
1314 set_ttbr0(abp->kern_ttbr0);
1315 cpu_tlb_flushID();
1316
1317 if (!valid)
1318 panic("Invalid bus configuration: %s",
1319 kern_getenv("kern.cfg.order"));
1320
1321 /*
1322 * Dump the boot metadata. We have to wait for cninit() since console
1323 * output is required. If it's grossly incorrect the kernel will never
1324 * make it this far.
1325 */
1326 if (getenv_is_true("debug.dump_modinfo_at_boot"))
1327 preload_dump();
1328
1329 init_proc0(abp->kern_stack);
1330 msgbufinit(msgbufp, msgbufsize);
1331 mutex_init();
1332 init_param2(physmem);
1333
1334 dbg_init();
1335 kdb_init();
1336 pan_enable();
1337
1338 kcsan_cpu_init(0);
1339
1340 env = kern_getenv("kernelname");
1341 if (env != NULL)
1342 strlcpy(kernelname, env, sizeof(kernelname));
1343
1344 if (boothowto & RB_VERBOSE) {
1345 if (efihdr != NULL)
1346 print_efi_map_entries(efihdr);
1347 physmem_print_tables();
1348 }
1349
1350 early_boot = 0;
1351 }
1352
1353 void
dbg_init(void)1354 dbg_init(void)
1355 {
1356
1357 /* Clear OS lock */
1358 WRITE_SPECIALREG(oslar_el1, 0);
1359
1360 /* This permits DDB to use debug registers for watchpoints. */
1361 dbg_monitor_init();
1362
1363 /* TODO: Eventually will need to initialize debug registers here. */
1364 }
1365
1366 #ifdef DDB
1367 #include <ddb/ddb.h>
1368
DB_SHOW_COMMAND(specialregs,db_show_spregs)1369 DB_SHOW_COMMAND(specialregs, db_show_spregs)
1370 {
1371 #define PRINT_REG(reg) \
1372 db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
1373
1374 PRINT_REG(actlr_el1);
1375 PRINT_REG(afsr0_el1);
1376 PRINT_REG(afsr1_el1);
1377 PRINT_REG(aidr_el1);
1378 PRINT_REG(amair_el1);
1379 PRINT_REG(ccsidr_el1);
1380 PRINT_REG(clidr_el1);
1381 PRINT_REG(contextidr_el1);
1382 PRINT_REG(cpacr_el1);
1383 PRINT_REG(csselr_el1);
1384 PRINT_REG(ctr_el0);
1385 PRINT_REG(currentel);
1386 PRINT_REG(daif);
1387 PRINT_REG(dczid_el0);
1388 PRINT_REG(elr_el1);
1389 PRINT_REG(esr_el1);
1390 PRINT_REG(far_el1);
1391 #if 0
1392 /* ARM64TODO: Enable VFP before reading floating-point registers */
1393 PRINT_REG(fpcr);
1394 PRINT_REG(fpsr);
1395 #endif
1396 PRINT_REG(id_aa64afr0_el1);
1397 PRINT_REG(id_aa64afr1_el1);
1398 PRINT_REG(id_aa64dfr0_el1);
1399 PRINT_REG(id_aa64dfr1_el1);
1400 PRINT_REG(id_aa64isar0_el1);
1401 PRINT_REG(id_aa64isar1_el1);
1402 PRINT_REG(id_aa64pfr0_el1);
1403 PRINT_REG(id_aa64pfr1_el1);
1404 PRINT_REG(id_afr0_el1);
1405 PRINT_REG(id_dfr0_el1);
1406 PRINT_REG(id_isar0_el1);
1407 PRINT_REG(id_isar1_el1);
1408 PRINT_REG(id_isar2_el1);
1409 PRINT_REG(id_isar3_el1);
1410 PRINT_REG(id_isar4_el1);
1411 PRINT_REG(id_isar5_el1);
1412 PRINT_REG(id_mmfr0_el1);
1413 PRINT_REG(id_mmfr1_el1);
1414 PRINT_REG(id_mmfr2_el1);
1415 PRINT_REG(id_mmfr3_el1);
1416 #if 0
1417 /* Missing from llvm */
1418 PRINT_REG(id_mmfr4_el1);
1419 #endif
1420 PRINT_REG(id_pfr0_el1);
1421 PRINT_REG(id_pfr1_el1);
1422 PRINT_REG(isr_el1);
1423 PRINT_REG(mair_el1);
1424 PRINT_REG(midr_el1);
1425 PRINT_REG(mpidr_el1);
1426 PRINT_REG(mvfr0_el1);
1427 PRINT_REG(mvfr1_el1);
1428 PRINT_REG(mvfr2_el1);
1429 PRINT_REG(revidr_el1);
1430 PRINT_REG(sctlr_el1);
1431 PRINT_REG(sp_el0);
1432 PRINT_REG(spsel);
1433 PRINT_REG(spsr_el1);
1434 PRINT_REG(tcr_el1);
1435 PRINT_REG(tpidr_el0);
1436 PRINT_REG(tpidr_el1);
1437 PRINT_REG(tpidrro_el0);
1438 PRINT_REG(ttbr0_el1);
1439 PRINT_REG(ttbr1_el1);
1440 PRINT_REG(vbar_el1);
1441 #undef PRINT_REG
1442 }
1443
DB_SHOW_COMMAND(vtop,db_show_vtop)1444 DB_SHOW_COMMAND(vtop, db_show_vtop)
1445 {
1446 uint64_t phys;
1447
1448 if (have_addr) {
1449 phys = arm64_address_translate_s1e1r(addr);
1450 db_printf("EL1 physical address reg (read): 0x%016lx\n", phys);
1451 phys = arm64_address_translate_s1e1w(addr);
1452 db_printf("EL1 physical address reg (write): 0x%016lx\n", phys);
1453 phys = arm64_address_translate_s1e0r(addr);
1454 db_printf("EL0 physical address reg (read): 0x%016lx\n", phys);
1455 phys = arm64_address_translate_s1e0w(addr);
1456 db_printf("EL0 physical address reg (write): 0x%016lx\n", phys);
1457 } else
1458 db_printf("show vtop <virt_addr>\n");
1459 }
1460 #endif
1461