1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1989, 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software developed by the Computer Systems
8 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
9 * BG 91-66 and contributed to Berkeley.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 __SCCSID("@(#)kvm.c 8.2 (Berkeley) 2/13/94");
39
40 #include <sys/param.h>
41 #include <sys/fnv_hash.h>
42
43 #define _WANT_VNET
44
45 #include <sys/user.h>
46 #include <sys/linker.h>
47 #include <sys/pcpu.h>
48 #include <sys/stat.h>
49 #include <sys/mman.h>
50
51 #include <net/vnet.h>
52
53 #include <fcntl.h>
54 #include <kvm.h>
55 #include <limits.h>
56 #include <paths.h>
57 #include <stdint.h>
58 #include <stdio.h>
59 #include <stdlib.h>
60 #include <string.h>
61 #include <unistd.h>
62
63 #include "kvm_private.h"
64
65 SET_DECLARE(kvm_arch, struct kvm_arch);
66
67 static char _kd_is_null[] = "";
68
69 char *
kvm_geterr(kvm_t * kd)70 kvm_geterr(kvm_t *kd)
71 {
72
73 if (kd == NULL)
74 return (_kd_is_null);
75 return (kd->errbuf);
76 }
77
78 static int
_kvm_read_kernel_ehdr(kvm_t * kd)79 _kvm_read_kernel_ehdr(kvm_t *kd)
80 {
81 Elf *elf;
82
83 if (elf_version(EV_CURRENT) == EV_NONE) {
84 _kvm_err(kd, kd->program, "Unsupported libelf");
85 return (-1);
86 }
87 elf = elf_begin(kd->nlfd, ELF_C_READ, NULL);
88 if (elf == NULL) {
89 _kvm_err(kd, kd->program, "%s", elf_errmsg(0));
90 return (-1);
91 }
92 if (elf_kind(elf) != ELF_K_ELF) {
93 _kvm_err(kd, kd->program, "kernel is not an ELF file");
94 return (-1);
95 }
96 if (gelf_getehdr(elf, &kd->nlehdr) == NULL) {
97 _kvm_err(kd, kd->program, "%s", elf_errmsg(0));
98 elf_end(elf);
99 return (-1);
100 }
101 elf_end(elf);
102
103 switch (kd->nlehdr.e_ident[EI_DATA]) {
104 case ELFDATA2LSB:
105 case ELFDATA2MSB:
106 return (0);
107 default:
108 _kvm_err(kd, kd->program,
109 "unsupported ELF data encoding for kernel");
110 return (-1);
111 }
112 }
113
114 static kvm_t *
_kvm_open(kvm_t * kd,const char * uf,const char * mf,int flag,char * errout)115 _kvm_open(kvm_t *kd, const char *uf, const char *mf, int flag, char *errout)
116 {
117 struct kvm_arch **parch;
118 struct stat st;
119
120 kd->vmfd = -1;
121 kd->pmfd = -1;
122 kd->nlfd = -1;
123 kd->vmst = NULL;
124 kd->procbase = NULL;
125 kd->argspc = NULL;
126 kd->argv = NULL;
127
128 if (uf == NULL)
129 uf = getbootfile();
130 else if (strlen(uf) >= MAXPATHLEN) {
131 _kvm_err(kd, kd->program, "exec file name too long");
132 goto failed;
133 }
134 if (flag & ~O_RDWR) {
135 _kvm_err(kd, kd->program, "bad flags arg");
136 goto failed;
137 }
138 if (mf == NULL)
139 mf = _PATH_MEM;
140
141 if ((kd->pmfd = open(mf, flag | O_CLOEXEC, 0)) < 0) {
142 _kvm_syserr(kd, kd->program, "%s", mf);
143 goto failed;
144 }
145 if (fstat(kd->pmfd, &st) < 0) {
146 _kvm_syserr(kd, kd->program, "%s", mf);
147 goto failed;
148 }
149 if (S_ISREG(st.st_mode) && st.st_size <= 0) {
150 errno = EINVAL;
151 _kvm_syserr(kd, kd->program, "empty file");
152 goto failed;
153 }
154 if (S_ISCHR(st.st_mode)) {
155 /*
156 * If this is a character special device, then check that
157 * it's /dev/mem. If so, open kmem too. (Maybe we should
158 * make it work for either /dev/mem or /dev/kmem -- in either
159 * case you're working with a live kernel.)
160 */
161 if (strcmp(mf, _PATH_DEVNULL) == 0) {
162 kd->vmfd = open(_PATH_DEVNULL, O_RDONLY | O_CLOEXEC);
163 return (kd);
164 } else if (strcmp(mf, _PATH_MEM) == 0) {
165 if ((kd->vmfd = open(_PATH_KMEM, flag | O_CLOEXEC)) <
166 0) {
167 _kvm_syserr(kd, kd->program, "%s", _PATH_KMEM);
168 goto failed;
169 }
170 return (kd);
171 }
172 }
173
174 /*
175 * This is either a crash dump or a remote live system with its physical
176 * memory fully accessible via a special device.
177 * Open the namelist fd and determine the architecture.
178 */
179 if ((kd->nlfd = open(uf, O_RDONLY | O_CLOEXEC, 0)) < 0) {
180 _kvm_syserr(kd, kd->program, "%s", uf);
181 goto failed;
182 }
183 if (_kvm_read_kernel_ehdr(kd) < 0)
184 goto failed;
185 if (strncmp(mf, _PATH_FWMEM, strlen(_PATH_FWMEM)) == 0 ||
186 strncmp(mf, _PATH_DEVVMM, strlen(_PATH_DEVVMM)) == 0) {
187 kd->rawdump = 1;
188 kd->writable = 1;
189 }
190 SET_FOREACH(parch, kvm_arch) {
191 if ((*parch)->ka_probe(kd)) {
192 kd->arch = *parch;
193 break;
194 }
195 }
196 if (kd->arch == NULL) {
197 _kvm_err(kd, kd->program, "unsupported architecture");
198 goto failed;
199 }
200
201 /*
202 * Non-native kernels require a symbol resolver.
203 */
204 if (!kd->arch->ka_native(kd) && kd->resolve_symbol == NULL) {
205 _kvm_err(kd, kd->program,
206 "non-native kernel requires a symbol resolver");
207 goto failed;
208 }
209
210 /*
211 * Initialize the virtual address translation machinery.
212 */
213 if (kd->arch->ka_initvtop(kd) < 0)
214 goto failed;
215 return (kd);
216 failed:
217 /*
218 * Copy out the error if doing sane error semantics.
219 */
220 if (errout != NULL)
221 strlcpy(errout, kd->errbuf, _POSIX2_LINE_MAX);
222 (void)kvm_close(kd);
223 return (NULL);
224 }
225
226 kvm_t *
kvm_openfiles(const char * uf,const char * mf,const char * sf __unused,int flag,char * errout)227 kvm_openfiles(const char *uf, const char *mf, const char *sf __unused, int flag,
228 char *errout)
229 {
230 kvm_t *kd;
231
232 if ((kd = calloc(1, sizeof(*kd))) == NULL) {
233 if (errout != NULL)
234 (void)strlcpy(errout, strerror(errno),
235 _POSIX2_LINE_MAX);
236 return (NULL);
237 }
238 return (_kvm_open(kd, uf, mf, flag, errout));
239 }
240
241 kvm_t *
kvm_open(const char * uf,const char * mf,const char * sf __unused,int flag,const char * errstr)242 kvm_open(const char *uf, const char *mf, const char *sf __unused, int flag,
243 const char *errstr)
244 {
245 kvm_t *kd;
246
247 if ((kd = calloc(1, sizeof(*kd))) == NULL) {
248 if (errstr != NULL)
249 (void)fprintf(stderr, "%s: %s\n",
250 errstr, strerror(errno));
251 return (NULL);
252 }
253 kd->program = errstr;
254 return (_kvm_open(kd, uf, mf, flag, NULL));
255 }
256
257 kvm_t *
kvm_open2(const char * uf,const char * mf,int flag,char * errout,int (* resolver)(const char *,kvaddr_t *))258 kvm_open2(const char *uf, const char *mf, int flag, char *errout,
259 int (*resolver)(const char *, kvaddr_t *))
260 {
261 kvm_t *kd;
262
263 if ((kd = calloc(1, sizeof(*kd))) == NULL) {
264 if (errout != NULL)
265 (void)strlcpy(errout, strerror(errno),
266 _POSIX2_LINE_MAX);
267 return (NULL);
268 }
269 kd->resolve_symbol = resolver;
270 return (_kvm_open(kd, uf, mf, flag, errout));
271 }
272
273 int
kvm_close(kvm_t * kd)274 kvm_close(kvm_t *kd)
275 {
276 int error = 0;
277
278 if (kd == NULL) {
279 errno = EINVAL;
280 return (-1);
281 }
282 if (kd->vmst != NULL)
283 kd->arch->ka_freevtop(kd);
284 if (kd->pmfd >= 0)
285 error |= close(kd->pmfd);
286 if (kd->vmfd >= 0)
287 error |= close(kd->vmfd);
288 if (kd->nlfd >= 0)
289 error |= close(kd->nlfd);
290 if (kd->procbase != 0)
291 free((void *)kd->procbase);
292 if (kd->argbuf != 0)
293 free((void *) kd->argbuf);
294 if (kd->argspc != 0)
295 free((void *) kd->argspc);
296 if (kd->argv != 0)
297 free((void *)kd->argv);
298 if (kd->pt_map != NULL)
299 free(kd->pt_map);
300 if (kd->page_map != NULL)
301 free(kd->page_map);
302 if (kd->sparse_map != MAP_FAILED)
303 munmap(kd->sparse_map, kd->pt_sparse_size);
304 free((void *)kd);
305
306 return (error);
307 }
308
309 int
kvm_nlist2(kvm_t * kd,struct kvm_nlist * nl)310 kvm_nlist2(kvm_t *kd, struct kvm_nlist *nl)
311 {
312
313 /*
314 * If called via the public interface, permit initialization of
315 * further virtualized modules on demand.
316 */
317 return (_kvm_nlist(kd, nl, 1));
318 }
319
320 int
kvm_nlist(kvm_t * kd,struct nlist * nl)321 kvm_nlist(kvm_t *kd, struct nlist *nl)
322 {
323 struct kvm_nlist *kl;
324 int count, i, nfail;
325
326 /*
327 * Avoid reporting truncated addresses by failing for non-native
328 * cores.
329 */
330 if (!kvm_native(kd)) {
331 _kvm_err(kd, kd->program, "kvm_nlist of non-native vmcore");
332 return (-1);
333 }
334
335 for (count = 0; nl[count].n_name != NULL && nl[count].n_name[0] != '\0';
336 count++)
337 ;
338 if (count == 0)
339 return (0);
340 kl = calloc(count + 1, sizeof(*kl));
341 for (i = 0; i < count; i++)
342 kl[i].n_name = nl[i].n_name;
343 nfail = kvm_nlist2(kd, kl);
344 for (i = 0; i < count; i++) {
345 nl[i].n_type = kl[i].n_type;
346 nl[i].n_other = 0;
347 nl[i].n_desc = 0;
348 nl[i].n_value = kl[i].n_value;
349 }
350 return (nfail);
351 }
352
353 ssize_t
kvm_read(kvm_t * kd,u_long kva,void * buf,size_t len)354 kvm_read(kvm_t *kd, u_long kva, void *buf, size_t len)
355 {
356
357 return (kvm_read2(kd, kva, buf, len));
358 }
359
360 ssize_t
kvm_read2(kvm_t * kd,kvaddr_t kva,void * buf,size_t len)361 kvm_read2(kvm_t *kd, kvaddr_t kva, void *buf, size_t len)
362 {
363 int cc;
364 ssize_t cr;
365 off_t pa;
366 char *cp;
367
368 if (ISALIVE(kd)) {
369 /*
370 * We're using /dev/kmem. Just read straight from the
371 * device and let the active kernel do the address translation.
372 */
373 errno = 0;
374 if (lseek(kd->vmfd, (off_t)kva, 0) == -1 && errno != 0) {
375 _kvm_err(kd, 0, "invalid address (0x%jx)",
376 (uintmax_t)kva);
377 return (-1);
378 }
379 cr = read(kd->vmfd, buf, len);
380 if (cr < 0) {
381 _kvm_syserr(kd, 0, "kvm_read");
382 return (-1);
383 } else if (cr < (ssize_t)len)
384 _kvm_err(kd, kd->program, "short read");
385 return (cr);
386 }
387
388 cp = buf;
389 while (len > 0) {
390 cc = kd->arch->ka_kvatop(kd, kva, &pa);
391 if (cc == 0)
392 return (-1);
393 if (cc > (ssize_t)len)
394 cc = len;
395 errno = 0;
396 if (lseek(kd->pmfd, pa, 0) == -1 && errno != 0) {
397 _kvm_syserr(kd, 0, _PATH_MEM);
398 break;
399 }
400 cr = read(kd->pmfd, cp, cc);
401 if (cr < 0) {
402 _kvm_syserr(kd, kd->program, "kvm_read");
403 break;
404 }
405 /*
406 * If ka_kvatop returns a bogus value or our core file is
407 * truncated, we might wind up seeking beyond the end of the
408 * core file in which case the read will return 0 (EOF).
409 */
410 if (cr == 0)
411 break;
412 cp += cr;
413 kva += cr;
414 len -= cr;
415 }
416
417 return (cp - (char *)buf);
418 }
419
420 ssize_t
kvm_write(kvm_t * kd,u_long kva,const void * buf,size_t len)421 kvm_write(kvm_t *kd, u_long kva, const void *buf, size_t len)
422 {
423 int cc;
424 ssize_t cw;
425 off_t pa;
426 const char *cp;
427
428 if (!ISALIVE(kd) && !kd->writable) {
429 _kvm_err(kd, kd->program,
430 "kvm_write not implemented for dead kernels");
431 return (-1);
432 }
433
434 if (ISALIVE(kd)) {
435 /*
436 * Just like kvm_read, only we write.
437 */
438 errno = 0;
439 if (lseek(kd->vmfd, (off_t)kva, 0) == -1 && errno != 0) {
440 _kvm_err(kd, 0, "invalid address (%lx)", kva);
441 return (-1);
442 }
443 cc = write(kd->vmfd, buf, len);
444 if (cc < 0) {
445 _kvm_syserr(kd, 0, "kvm_write");
446 return (-1);
447 } else if ((size_t)cc < len)
448 _kvm_err(kd, kd->program, "short write");
449 return (cc);
450 }
451
452 cp = buf;
453 while (len > 0) {
454 cc = kd->arch->ka_kvatop(kd, kva, &pa);
455 if (cc == 0)
456 return (-1);
457 if (cc > (ssize_t)len)
458 cc = len;
459 errno = 0;
460 if (lseek(kd->pmfd, pa, 0) == -1 && errno != 0) {
461 _kvm_syserr(kd, 0, _PATH_MEM);
462 break;
463 }
464 cw = write(kd->pmfd, cp, cc);
465 if (cw < 0) {
466 _kvm_syserr(kd, kd->program, "kvm_write");
467 break;
468 }
469 /*
470 * If ka_kvatop returns a bogus value or our core file is
471 * truncated, we might wind up seeking beyond the end of the
472 * core file in which case the read will return 0 (EOF).
473 */
474 if (cw == 0)
475 break;
476 cp += cw;
477 kva += cw;
478 len -= cw;
479 }
480
481 return (cp - (const char *)buf);
482 }
483
484 int
kvm_native(kvm_t * kd)485 kvm_native(kvm_t *kd)
486 {
487
488 if (ISALIVE(kd))
489 return (1);
490 return (kd->arch->ka_native(kd));
491 }
492
493 int
kvm_walk_pages(kvm_t * kd,kvm_walk_pages_cb_t * cb,void * closure)494 kvm_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *closure)
495 {
496
497 if (kd->arch->ka_walk_pages == NULL)
498 return (0);
499
500 return (kd->arch->ka_walk_pages(kd, cb, closure));
501 }
502