1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2005 Olivier Houchard
5 * Copyright (c) 1989, 1992, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software developed by the Computer Systems
9 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
10 * BG 91-66 and contributed to Berkeley.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
29 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
30 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 /*
37 * ARM machine dependent routines for kvm.
38 */
39
40 #include <sys/cdefs.h>
41 #include <sys/param.h>
42 #include <sys/endian.h>
43 #include <kvm.h>
44 #include <limits.h>
45 #include <stdint.h>
46 #include <stdlib.h>
47 #include <unistd.h>
48
49 #ifdef __arm__
50 #include <machine/vmparam.h>
51 #endif
52
53 #include "kvm_private.h"
54 #include "kvm_arm.h"
55
56 struct vmstate {
57 arm_pd_entry_t *l1pt;
58 size_t phnum;
59 GElf_Phdr *phdr;
60 };
61
62 /*
63 * Translate a physical memory address to a file-offset in the crash-dump.
64 */
65 static size_t
_kvm_pa2off(kvm_t * kd,uint64_t pa,off_t * ofs,size_t pgsz)66 _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs, size_t pgsz)
67 {
68 struct vmstate *vm = kd->vmst;
69 GElf_Phdr *p;
70 size_t n;
71
72 p = vm->phdr;
73 n = vm->phnum;
74 while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz))
75 p++, n--;
76 if (n == 0)
77 return (0);
78
79 *ofs = (pa - p->p_paddr) + p->p_offset;
80 if (pgsz == 0)
81 return (p->p_memsz - (pa - p->p_paddr));
82 return (pgsz - ((size_t)pa & (pgsz - 1)));
83 }
84
85 static void
_arm_freevtop(kvm_t * kd)86 _arm_freevtop(kvm_t *kd)
87 {
88 struct vmstate *vm = kd->vmst;
89
90 free(vm->phdr);
91 free(vm);
92 kd->vmst = NULL;
93 }
94
95 static int
_arm_probe(kvm_t * kd)96 _arm_probe(kvm_t *kd)
97 {
98
99 return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_ARM) &&
100 !_kvm_is_minidump(kd));
101 }
102
103 static int
_arm_initvtop(kvm_t * kd)104 _arm_initvtop(kvm_t *kd)
105 {
106 struct vmstate *vm;
107 struct kvm_nlist nl[2];
108 kvaddr_t kernbase;
109 arm_physaddr_t physaddr, pa;
110 arm_pd_entry_t *l1pt;
111 size_t i;
112 int found;
113
114 if (kd->rawdump) {
115 _kvm_err(kd, kd->program, "raw dumps not supported on arm");
116 return (-1);
117 }
118
119 vm = _kvm_malloc(kd, sizeof(*vm));
120 if (vm == NULL) {
121 _kvm_err(kd, kd->program, "cannot allocate vm");
122 return (-1);
123 }
124 kd->vmst = vm;
125 vm->l1pt = NULL;
126
127 if (_kvm_read_core_phdrs(kd, &vm->phnum, &vm->phdr) == -1)
128 return (-1);
129
130 found = 0;
131 for (i = 0; i < vm->phnum; i++) {
132 if (vm->phdr[i].p_type == PT_DUMP_DELTA) {
133 kernbase = vm->phdr[i].p_vaddr;
134 physaddr = vm->phdr[i].p_paddr;
135 found = 1;
136 break;
137 }
138 }
139
140 nl[1].n_name = NULL;
141 if (!found) {
142 nl[0].n_name = "kernbase";
143 if (kvm_nlist2(kd, nl) != 0) {
144 #ifdef __arm__
145 kernbase = KERNBASE;
146 #else
147 _kvm_err(kd, kd->program, "cannot resolve kernbase");
148 return (-1);
149 #endif
150 } else
151 kernbase = nl[0].n_value;
152
153 nl[0].n_name = "physaddr";
154 if (kvm_nlist2(kd, nl) != 0) {
155 _kvm_err(kd, kd->program, "couldn't get phys addr");
156 return (-1);
157 }
158 physaddr = nl[0].n_value;
159 }
160 nl[0].n_name = "kernel_l1pa";
161 if (kvm_nlist2(kd, nl) != 0) {
162 _kvm_err(kd, kd->program, "bad namelist");
163 return (-1);
164 }
165 if (kvm_read2(kd, (nl[0].n_value - kernbase + physaddr), &pa,
166 sizeof(pa)) != sizeof(pa)) {
167 _kvm_err(kd, kd->program, "cannot read kernel_l1pa");
168 return (-1);
169 }
170 l1pt = _kvm_malloc(kd, ARM_L1_TABLE_SIZE);
171 if (l1pt == NULL) {
172 _kvm_err(kd, kd->program, "cannot allocate l1pt");
173 return (-1);
174 }
175 if (kvm_read2(kd, pa, l1pt, ARM_L1_TABLE_SIZE) != ARM_L1_TABLE_SIZE) {
176 _kvm_err(kd, kd->program, "cannot read l1pt");
177 free(l1pt);
178 return (-1);
179 }
180 vm->l1pt = l1pt;
181 return 0;
182 }
183
184 /* from arm/pmap.c */
185 #define ARM_L1_IDX(va) ((va) >> ARM_L1_S_SHIFT)
186
187 #define l1pte_section_p(pde) (((pde) & ARM_L1_TYPE_MASK) == ARM_L1_TYPE_S)
188 #define l1pte_valid(pde) ((pde) != 0)
189 #define l2pte_valid(pte) ((pte) != 0)
190 #define l2pte_index(v) (((v) & ARM_L1_S_OFFSET) >> ARM_L2_S_SHIFT)
191
192
193 static int
_arm_kvatop(kvm_t * kd,kvaddr_t va,off_t * pa)194 _arm_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
195 {
196 struct vmstate *vm = kd->vmst;
197 arm_pd_entry_t pd;
198 arm_pt_entry_t pte;
199 arm_physaddr_t pte_pa;
200 off_t pte_off;
201
202 if (vm->l1pt == NULL)
203 return (_kvm_pa2off(kd, va, pa, ARM_PAGE_SIZE));
204 pd = _kvm32toh(kd, vm->l1pt[ARM_L1_IDX(va)]);
205 if (!l1pte_valid(pd))
206 goto invalid;
207 if (l1pte_section_p(pd)) {
208 /* 1MB section mapping. */
209 *pa = (pd & ARM_L1_S_ADDR_MASK) + (va & ARM_L1_S_OFFSET);
210 return (_kvm_pa2off(kd, *pa, pa, ARM_L1_S_SIZE));
211 }
212 pte_pa = (pd & ARM_L1_C_ADDR_MASK) + l2pte_index(va) * sizeof(pte);
213 _kvm_pa2off(kd, pte_pa, &pte_off, ARM_L1_S_SIZE);
214 if (pread(kd->pmfd, &pte, sizeof(pte), pte_off) != sizeof(pte)) {
215 _kvm_syserr(kd, kd->program, "_arm_kvatop: pread");
216 goto invalid;
217 }
218 pte = _kvm32toh(kd, pte);
219 if (!l2pte_valid(pte)) {
220 goto invalid;
221 }
222 if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_L) {
223 *pa = (pte & ARM_L2_L_FRAME) | (va & ARM_L2_L_OFFSET);
224 return (_kvm_pa2off(kd, *pa, pa, ARM_L2_L_SIZE));
225 }
226 *pa = (pte & ARM_L2_S_FRAME) | (va & ARM_L2_S_OFFSET);
227 return (_kvm_pa2off(kd, *pa, pa, ARM_PAGE_SIZE));
228 invalid:
229 _kvm_err(kd, 0, "Invalid address (%jx)", (uintmax_t)va);
230 return 0;
231 }
232
233 /*
234 * Machine-dependent initialization for ALL open kvm descriptors,
235 * not just those for a kernel crash dump. Some architectures
236 * have to deal with these NOT being constants! (i.e. m68k)
237 */
238 #ifdef FBSD_NOT_YET
239 int
_kvm_mdopen(kvm_t * kd)240 _kvm_mdopen(kvm_t *kd)
241 {
242
243 kd->usrstack = USRSTACK;
244 kd->min_uva = VM_MIN_ADDRESS;
245 kd->max_uva = VM_MAXUSER_ADDRESS;
246
247 return (0);
248 }
249 #endif
250
251 int
252 #ifdef __arm__
_arm_native(kvm_t * kd)253 _arm_native(kvm_t *kd)
254 #else
255 _arm_native(kvm_t *kd __unused)
256 #endif
257 {
258
259 #ifdef __arm__
260 #if _BYTE_ORDER == _LITTLE_ENDIAN
261 return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB);
262 #else
263 return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB);
264 #endif
265 #else
266 return (0);
267 #endif
268 }
269
270 static struct kvm_arch kvm_arm = {
271 .ka_probe = _arm_probe,
272 .ka_initvtop = _arm_initvtop,
273 .ka_freevtop = _arm_freevtop,
274 .ka_kvatop = _arm_kvatop,
275 .ka_native = _arm_native,
276 };
277
278 KVM_ARCH(kvm_arm);
279