1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2011 NetApp, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/capsicum.h>
32 #include <sys/sysctl.h>
33 #include <sys/ioctl.h>
34 #include <sys/linker.h>
35 #include <sys/mman.h>
36 #include <sys/module.h>
37 #include <sys/_iovec.h>
38 #include <sys/cpuset.h>
39
40 #include <capsicum_helpers.h>
41 #include <errno.h>
42 #include <stdbool.h>
43 #include <stdio.h>
44 #include <stdlib.h>
45 #include <assert.h>
46 #include <string.h>
47 #include <fcntl.h>
48 #include <unistd.h>
49
50 #include <libutil.h>
51
52 #include <vm/vm.h>
53 #include <machine/vmm.h>
54 #include <machine/vmm_dev.h>
55 #include <machine/vmm_snapshot.h>
56
57 #include "vmmapi.h"
58 #include "internal.h"
59
60 #define MB (1024 * 1024UL)
61 #define GB (1024 * 1024 * 1024UL)
62
63 /*
64 * Size of the guard region before and after the virtual address space
65 * mapping the guest physical memory. This must be a multiple of the
66 * superpage size for performance reasons.
67 */
68 #define VM_MMAP_GUARD_SIZE (4 * MB)
69
70 #define PROT_RW (PROT_READ | PROT_WRITE)
71 #define PROT_ALL (PROT_READ | PROT_WRITE | PROT_EXEC)
72
73 struct vmctx {
74 int fd;
75 uint32_t lowmem_limit;
76 int memflags;
77 size_t lowmem;
78 size_t highmem;
79 char *baseaddr;
80 char *name;
81 };
82
83 #define CREATE(x) sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x)))
84 #define DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x)))
85
86 static int
vm_device_open(const char * name)87 vm_device_open(const char *name)
88 {
89 int fd, len;
90 char *vmfile;
91
92 len = strlen("/dev/vmm/") + strlen(name) + 1;
93 vmfile = malloc(len);
94 assert(vmfile != NULL);
95 snprintf(vmfile, len, "/dev/vmm/%s", name);
96
97 /* Open the device file */
98 fd = open(vmfile, O_RDWR, 0);
99
100 free(vmfile);
101 return (fd);
102 }
103
104 int
vm_create(const char * name)105 vm_create(const char *name)
106 {
107 /* Try to load vmm(4) module before creating a guest. */
108 if (modfind("vmm") < 0)
109 kldload("vmm");
110 return (CREATE(name));
111 }
112
113 struct vmctx *
vm_open(const char * name)114 vm_open(const char *name)
115 {
116 struct vmctx *vm;
117 int saved_errno;
118
119 vm = malloc(sizeof(struct vmctx) + strlen(name) + 1);
120 assert(vm != NULL);
121
122 vm->fd = -1;
123 vm->memflags = 0;
124 vm->lowmem_limit = 3 * GB;
125 vm->name = (char *)(vm + 1);
126 strcpy(vm->name, name);
127
128 if ((vm->fd = vm_device_open(vm->name)) < 0)
129 goto err;
130
131 return (vm);
132 err:
133 saved_errno = errno;
134 free(vm);
135 errno = saved_errno;
136 return (NULL);
137 }
138
139 void
vm_close(struct vmctx * vm)140 vm_close(struct vmctx *vm)
141 {
142 assert(vm != NULL);
143
144 close(vm->fd);
145 free(vm);
146 }
147
148 void
vm_destroy(struct vmctx * vm)149 vm_destroy(struct vmctx *vm)
150 {
151 assert(vm != NULL);
152
153 if (vm->fd >= 0)
154 close(vm->fd);
155 DESTROY(vm->name);
156
157 free(vm);
158 }
159
160 struct vcpu *
vm_vcpu_open(struct vmctx * ctx,int vcpuid)161 vm_vcpu_open(struct vmctx *ctx, int vcpuid)
162 {
163 struct vcpu *vcpu;
164
165 vcpu = malloc(sizeof(*vcpu));
166 vcpu->ctx = ctx;
167 vcpu->vcpuid = vcpuid;
168 return (vcpu);
169 }
170
171 void
vm_vcpu_close(struct vcpu * vcpu)172 vm_vcpu_close(struct vcpu *vcpu)
173 {
174 free(vcpu);
175 }
176
177 int
vcpu_id(struct vcpu * vcpu)178 vcpu_id(struct vcpu *vcpu)
179 {
180 return (vcpu->vcpuid);
181 }
182
183 int
vm_parse_memsize(const char * opt,size_t * ret_memsize)184 vm_parse_memsize(const char *opt, size_t *ret_memsize)
185 {
186 char *endptr;
187 size_t optval;
188 int error;
189
190 optval = strtoul(opt, &endptr, 0);
191 if (*opt != '\0' && *endptr == '\0') {
192 /*
193 * For the sake of backward compatibility if the memory size
194 * specified on the command line is less than a megabyte then
195 * it is interpreted as being in units of MB.
196 */
197 if (optval < MB)
198 optval *= MB;
199 *ret_memsize = optval;
200 error = 0;
201 } else
202 error = expand_number(opt, ret_memsize);
203
204 return (error);
205 }
206
207 uint32_t
vm_get_lowmem_limit(struct vmctx * ctx)208 vm_get_lowmem_limit(struct vmctx *ctx)
209 {
210
211 return (ctx->lowmem_limit);
212 }
213
214 void
vm_set_lowmem_limit(struct vmctx * ctx,uint32_t limit)215 vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit)
216 {
217
218 ctx->lowmem_limit = limit;
219 }
220
221 void
vm_set_memflags(struct vmctx * ctx,int flags)222 vm_set_memflags(struct vmctx *ctx, int flags)
223 {
224
225 ctx->memflags = flags;
226 }
227
228 int
vm_get_memflags(struct vmctx * ctx)229 vm_get_memflags(struct vmctx *ctx)
230 {
231
232 return (ctx->memflags);
233 }
234
235 /*
236 * Map segment 'segid' starting at 'off' into guest address range [gpa,gpa+len).
237 */
238 int
vm_mmap_memseg(struct vmctx * ctx,vm_paddr_t gpa,int segid,vm_ooffset_t off,size_t len,int prot)239 vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid, vm_ooffset_t off,
240 size_t len, int prot)
241 {
242 struct vm_memmap memmap;
243 int error, flags;
244
245 memmap.gpa = gpa;
246 memmap.segid = segid;
247 memmap.segoff = off;
248 memmap.len = len;
249 memmap.prot = prot;
250 memmap.flags = 0;
251
252 if (ctx->memflags & VM_MEM_F_WIRED)
253 memmap.flags |= VM_MEMMAP_F_WIRED;
254
255 /*
256 * If this mapping already exists then don't create it again. This
257 * is the common case for SYSMEM mappings created by bhyveload(8).
258 */
259 error = vm_mmap_getnext(ctx, &gpa, &segid, &off, &len, &prot, &flags);
260 if (error == 0 && gpa == memmap.gpa) {
261 if (segid != memmap.segid || off != memmap.segoff ||
262 prot != memmap.prot || flags != memmap.flags) {
263 errno = EEXIST;
264 return (-1);
265 } else {
266 return (0);
267 }
268 }
269
270 error = ioctl(ctx->fd, VM_MMAP_MEMSEG, &memmap);
271 return (error);
272 }
273
274 int
vm_get_guestmem_from_ctx(struct vmctx * ctx,char ** guest_baseaddr,size_t * lowmem_size,size_t * highmem_size)275 vm_get_guestmem_from_ctx(struct vmctx *ctx, char **guest_baseaddr,
276 size_t *lowmem_size, size_t *highmem_size)
277 {
278
279 *guest_baseaddr = ctx->baseaddr;
280 *lowmem_size = ctx->lowmem;
281 *highmem_size = ctx->highmem;
282 return (0);
283 }
284
285 int
vm_munmap_memseg(struct vmctx * ctx,vm_paddr_t gpa,size_t len)286 vm_munmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, size_t len)
287 {
288 struct vm_munmap munmap;
289 int error;
290
291 munmap.gpa = gpa;
292 munmap.len = len;
293
294 error = ioctl(ctx->fd, VM_MUNMAP_MEMSEG, &munmap);
295 return (error);
296 }
297
298 int
vm_mmap_getnext(struct vmctx * ctx,vm_paddr_t * gpa,int * segid,vm_ooffset_t * segoff,size_t * len,int * prot,int * flags)299 vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid,
300 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
301 {
302 struct vm_memmap memmap;
303 int error;
304
305 bzero(&memmap, sizeof(struct vm_memmap));
306 memmap.gpa = *gpa;
307 error = ioctl(ctx->fd, VM_MMAP_GETNEXT, &memmap);
308 if (error == 0) {
309 *gpa = memmap.gpa;
310 *segid = memmap.segid;
311 *segoff = memmap.segoff;
312 *len = memmap.len;
313 *prot = memmap.prot;
314 *flags = memmap.flags;
315 }
316 return (error);
317 }
318
319 /*
320 * Return 0 if the segments are identical and non-zero otherwise.
321 *
322 * This is slightly complicated by the fact that only device memory segments
323 * are named.
324 */
325 static int
cmpseg(size_t len,const char * str,size_t len2,const char * str2)326 cmpseg(size_t len, const char *str, size_t len2, const char *str2)
327 {
328
329 if (len == len2) {
330 if ((!str && !str2) || (str && str2 && !strcmp(str, str2)))
331 return (0);
332 }
333 return (-1);
334 }
335
336 static int
vm_alloc_memseg(struct vmctx * ctx,int segid,size_t len,const char * name)337 vm_alloc_memseg(struct vmctx *ctx, int segid, size_t len, const char *name)
338 {
339 struct vm_memseg memseg;
340 size_t n;
341 int error;
342
343 /*
344 * If the memory segment has already been created then just return.
345 * This is the usual case for the SYSMEM segment created by userspace
346 * loaders like bhyveload(8).
347 */
348 error = vm_get_memseg(ctx, segid, &memseg.len, memseg.name,
349 sizeof(memseg.name));
350 if (error)
351 return (error);
352
353 if (memseg.len != 0) {
354 if (cmpseg(len, name, memseg.len, VM_MEMSEG_NAME(&memseg))) {
355 errno = EINVAL;
356 return (-1);
357 } else {
358 return (0);
359 }
360 }
361
362 bzero(&memseg, sizeof(struct vm_memseg));
363 memseg.segid = segid;
364 memseg.len = len;
365 if (name != NULL) {
366 n = strlcpy(memseg.name, name, sizeof(memseg.name));
367 if (n >= sizeof(memseg.name)) {
368 errno = ENAMETOOLONG;
369 return (-1);
370 }
371 }
372
373 error = ioctl(ctx->fd, VM_ALLOC_MEMSEG, &memseg);
374 return (error);
375 }
376
377 int
vm_get_memseg(struct vmctx * ctx,int segid,size_t * lenp,char * namebuf,size_t bufsize)378 vm_get_memseg(struct vmctx *ctx, int segid, size_t *lenp, char *namebuf,
379 size_t bufsize)
380 {
381 struct vm_memseg memseg;
382 size_t n;
383 int error;
384
385 memseg.segid = segid;
386 error = ioctl(ctx->fd, VM_GET_MEMSEG, &memseg);
387 if (error == 0) {
388 *lenp = memseg.len;
389 n = strlcpy(namebuf, memseg.name, bufsize);
390 if (n >= bufsize) {
391 errno = ENAMETOOLONG;
392 error = -1;
393 }
394 }
395 return (error);
396 }
397
398 static int
setup_memory_segment(struct vmctx * ctx,vm_paddr_t gpa,size_t len,char * base)399 setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char *base)
400 {
401 char *ptr;
402 int error, flags;
403
404 /* Map 'len' bytes starting at 'gpa' in the guest address space */
405 error = vm_mmap_memseg(ctx, gpa, VM_SYSMEM, gpa, len, PROT_ALL);
406 if (error)
407 return (error);
408
409 flags = MAP_SHARED | MAP_FIXED;
410 if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
411 flags |= MAP_NOCORE;
412
413 /* mmap into the process address space on the host */
414 ptr = mmap(base + gpa, len, PROT_RW, flags, ctx->fd, gpa);
415 if (ptr == MAP_FAILED)
416 return (-1);
417
418 return (0);
419 }
420
421 int
vm_setup_memory(struct vmctx * ctx,size_t memsize,enum vm_mmap_style vms)422 vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms)
423 {
424 size_t objsize, len;
425 vm_paddr_t gpa;
426 char *baseaddr, *ptr;
427 int error;
428
429 assert(vms == VM_MMAP_ALL);
430
431 /*
432 * If 'memsize' cannot fit entirely in the 'lowmem' segment then
433 * create another 'highmem' segment above 4GB for the remainder.
434 */
435 if (memsize > ctx->lowmem_limit) {
436 ctx->lowmem = ctx->lowmem_limit;
437 ctx->highmem = memsize - ctx->lowmem_limit;
438 objsize = 4*GB + ctx->highmem;
439 } else {
440 ctx->lowmem = memsize;
441 ctx->highmem = 0;
442 objsize = ctx->lowmem;
443 }
444
445 error = vm_alloc_memseg(ctx, VM_SYSMEM, objsize, NULL);
446 if (error)
447 return (error);
448
449 /*
450 * Stake out a contiguous region covering the guest physical memory
451 * and the adjoining guard regions.
452 */
453 len = VM_MMAP_GUARD_SIZE + objsize + VM_MMAP_GUARD_SIZE;
454 ptr = mmap(NULL, len, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1, 0);
455 if (ptr == MAP_FAILED)
456 return (-1);
457
458 baseaddr = ptr + VM_MMAP_GUARD_SIZE;
459 if (ctx->highmem > 0) {
460 gpa = 4*GB;
461 len = ctx->highmem;
462 error = setup_memory_segment(ctx, gpa, len, baseaddr);
463 if (error)
464 return (error);
465 }
466
467 if (ctx->lowmem > 0) {
468 gpa = 0;
469 len = ctx->lowmem;
470 error = setup_memory_segment(ctx, gpa, len, baseaddr);
471 if (error)
472 return (error);
473 }
474
475 ctx->baseaddr = baseaddr;
476
477 return (0);
478 }
479
480 /*
481 * Returns a non-NULL pointer if [gaddr, gaddr+len) is entirely contained in
482 * the lowmem or highmem regions.
483 *
484 * In particular return NULL if [gaddr, gaddr+len) falls in guest MMIO region.
485 * The instruction emulation code depends on this behavior.
486 */
487 void *
vm_map_gpa(struct vmctx * ctx,vm_paddr_t gaddr,size_t len)488 vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
489 {
490
491 if (ctx->lowmem > 0) {
492 if (gaddr < ctx->lowmem && len <= ctx->lowmem &&
493 gaddr + len <= ctx->lowmem)
494 return (ctx->baseaddr + gaddr);
495 }
496
497 if (ctx->highmem > 0) {
498 if (gaddr >= 4*GB) {
499 if (gaddr < 4*GB + ctx->highmem &&
500 len <= ctx->highmem &&
501 gaddr + len <= 4*GB + ctx->highmem)
502 return (ctx->baseaddr + gaddr);
503 }
504 }
505
506 return (NULL);
507 }
508
509 vm_paddr_t
vm_rev_map_gpa(struct vmctx * ctx,void * addr)510 vm_rev_map_gpa(struct vmctx *ctx, void *addr)
511 {
512 vm_paddr_t offaddr;
513
514 offaddr = (char *)addr - ctx->baseaddr;
515
516 if (ctx->lowmem > 0)
517 if (offaddr <= ctx->lowmem)
518 return (offaddr);
519
520 if (ctx->highmem > 0)
521 if (offaddr >= 4*GB && offaddr < 4*GB + ctx->highmem)
522 return (offaddr);
523
524 return ((vm_paddr_t)-1);
525 }
526
527 const char *
vm_get_name(struct vmctx * ctx)528 vm_get_name(struct vmctx *ctx)
529 {
530
531 return (ctx->name);
532 }
533
534 size_t
vm_get_lowmem_size(struct vmctx * ctx)535 vm_get_lowmem_size(struct vmctx *ctx)
536 {
537
538 return (ctx->lowmem);
539 }
540
541 size_t
vm_get_highmem_size(struct vmctx * ctx)542 vm_get_highmem_size(struct vmctx *ctx)
543 {
544
545 return (ctx->highmem);
546 }
547
548 void *
vm_create_devmem(struct vmctx * ctx,int segid,const char * name,size_t len)549 vm_create_devmem(struct vmctx *ctx, int segid, const char *name, size_t len)
550 {
551 char pathname[MAXPATHLEN];
552 size_t len2;
553 char *base, *ptr;
554 int fd, error, flags;
555
556 fd = -1;
557 ptr = MAP_FAILED;
558 if (name == NULL || strlen(name) == 0) {
559 errno = EINVAL;
560 goto done;
561 }
562
563 error = vm_alloc_memseg(ctx, segid, len, name);
564 if (error)
565 goto done;
566
567 strlcpy(pathname, "/dev/vmm.io/", sizeof(pathname));
568 strlcat(pathname, ctx->name, sizeof(pathname));
569 strlcat(pathname, ".", sizeof(pathname));
570 strlcat(pathname, name, sizeof(pathname));
571
572 fd = open(pathname, O_RDWR);
573 if (fd < 0)
574 goto done;
575
576 /*
577 * Stake out a contiguous region covering the device memory and the
578 * adjoining guard regions.
579 */
580 len2 = VM_MMAP_GUARD_SIZE + len + VM_MMAP_GUARD_SIZE;
581 base = mmap(NULL, len2, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1,
582 0);
583 if (base == MAP_FAILED)
584 goto done;
585
586 flags = MAP_SHARED | MAP_FIXED;
587 if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
588 flags |= MAP_NOCORE;
589
590 /* mmap the devmem region in the host address space */
591 ptr = mmap(base + VM_MMAP_GUARD_SIZE, len, PROT_RW, flags, fd, 0);
592 done:
593 if (fd >= 0)
594 close(fd);
595 return (ptr);
596 }
597
598 static int
vcpu_ioctl(struct vcpu * vcpu,u_long cmd,void * arg)599 vcpu_ioctl(struct vcpu *vcpu, u_long cmd, void *arg)
600 {
601 /*
602 * XXX: fragile, handle with care
603 * Assumes that the first field of the ioctl data
604 * is the vcpuid.
605 */
606 *(int *)arg = vcpu->vcpuid;
607 return (ioctl(vcpu->ctx->fd, cmd, arg));
608 }
609
610 int
vm_set_desc(struct vcpu * vcpu,int reg,uint64_t base,uint32_t limit,uint32_t access)611 vm_set_desc(struct vcpu *vcpu, int reg,
612 uint64_t base, uint32_t limit, uint32_t access)
613 {
614 int error;
615 struct vm_seg_desc vmsegdesc;
616
617 bzero(&vmsegdesc, sizeof(vmsegdesc));
618 vmsegdesc.regnum = reg;
619 vmsegdesc.desc.base = base;
620 vmsegdesc.desc.limit = limit;
621 vmsegdesc.desc.access = access;
622
623 error = vcpu_ioctl(vcpu, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc);
624 return (error);
625 }
626
627 int
vm_get_desc(struct vcpu * vcpu,int reg,uint64_t * base,uint32_t * limit,uint32_t * access)628 vm_get_desc(struct vcpu *vcpu, int reg, uint64_t *base, uint32_t *limit,
629 uint32_t *access)
630 {
631 int error;
632 struct vm_seg_desc vmsegdesc;
633
634 bzero(&vmsegdesc, sizeof(vmsegdesc));
635 vmsegdesc.regnum = reg;
636
637 error = vcpu_ioctl(vcpu, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc);
638 if (error == 0) {
639 *base = vmsegdesc.desc.base;
640 *limit = vmsegdesc.desc.limit;
641 *access = vmsegdesc.desc.access;
642 }
643 return (error);
644 }
645
646 int
vm_get_seg_desc(struct vcpu * vcpu,int reg,struct seg_desc * seg_desc)647 vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *seg_desc)
648 {
649 int error;
650
651 error = vm_get_desc(vcpu, reg, &seg_desc->base, &seg_desc->limit,
652 &seg_desc->access);
653 return (error);
654 }
655
656 int
vm_set_register(struct vcpu * vcpu,int reg,uint64_t val)657 vm_set_register(struct vcpu *vcpu, int reg, uint64_t val)
658 {
659 int error;
660 struct vm_register vmreg;
661
662 bzero(&vmreg, sizeof(vmreg));
663 vmreg.regnum = reg;
664 vmreg.regval = val;
665
666 error = vcpu_ioctl(vcpu, VM_SET_REGISTER, &vmreg);
667 return (error);
668 }
669
670 int
vm_get_register(struct vcpu * vcpu,int reg,uint64_t * ret_val)671 vm_get_register(struct vcpu *vcpu, int reg, uint64_t *ret_val)
672 {
673 int error;
674 struct vm_register vmreg;
675
676 bzero(&vmreg, sizeof(vmreg));
677 vmreg.regnum = reg;
678
679 error = vcpu_ioctl(vcpu, VM_GET_REGISTER, &vmreg);
680 *ret_val = vmreg.regval;
681 return (error);
682 }
683
684 int
vm_set_register_set(struct vcpu * vcpu,unsigned int count,const int * regnums,uint64_t * regvals)685 vm_set_register_set(struct vcpu *vcpu, unsigned int count,
686 const int *regnums, uint64_t *regvals)
687 {
688 int error;
689 struct vm_register_set vmregset;
690
691 bzero(&vmregset, sizeof(vmregset));
692 vmregset.count = count;
693 vmregset.regnums = regnums;
694 vmregset.regvals = regvals;
695
696 error = vcpu_ioctl(vcpu, VM_SET_REGISTER_SET, &vmregset);
697 return (error);
698 }
699
700 int
vm_get_register_set(struct vcpu * vcpu,unsigned int count,const int * regnums,uint64_t * regvals)701 vm_get_register_set(struct vcpu *vcpu, unsigned int count,
702 const int *regnums, uint64_t *regvals)
703 {
704 int error;
705 struct vm_register_set vmregset;
706
707 bzero(&vmregset, sizeof(vmregset));
708 vmregset.count = count;
709 vmregset.regnums = regnums;
710 vmregset.regvals = regvals;
711
712 error = vcpu_ioctl(vcpu, VM_GET_REGISTER_SET, &vmregset);
713 return (error);
714 }
715
716 int
vm_run(struct vcpu * vcpu,struct vm_run * vmrun)717 vm_run(struct vcpu *vcpu, struct vm_run *vmrun)
718 {
719 return (vcpu_ioctl(vcpu, VM_RUN, vmrun));
720 }
721
722 int
vm_suspend(struct vmctx * ctx,enum vm_suspend_how how)723 vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
724 {
725 struct vm_suspend vmsuspend;
726
727 bzero(&vmsuspend, sizeof(vmsuspend));
728 vmsuspend.how = how;
729 return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend));
730 }
731
732 int
vm_reinit(struct vmctx * ctx)733 vm_reinit(struct vmctx *ctx)
734 {
735
736 return (ioctl(ctx->fd, VM_REINIT, 0));
737 }
738
739 int
vm_inject_exception(struct vcpu * vcpu,int vector,int errcode_valid,uint32_t errcode,int restart_instruction)740 vm_inject_exception(struct vcpu *vcpu, int vector, int errcode_valid,
741 uint32_t errcode, int restart_instruction)
742 {
743 struct vm_exception exc;
744
745 exc.vector = vector;
746 exc.error_code = errcode;
747 exc.error_code_valid = errcode_valid;
748 exc.restart_instruction = restart_instruction;
749
750 return (vcpu_ioctl(vcpu, VM_INJECT_EXCEPTION, &exc));
751 }
752
753 int
vm_apicid2vcpu(struct vmctx * ctx __unused,int apicid)754 vm_apicid2vcpu(struct vmctx *ctx __unused, int apicid)
755 {
756 /*
757 * The apic id associated with the 'vcpu' has the same numerical value
758 * as the 'vcpu' itself.
759 */
760 return (apicid);
761 }
762
763 int
vm_lapic_irq(struct vcpu * vcpu,int vector)764 vm_lapic_irq(struct vcpu *vcpu, int vector)
765 {
766 struct vm_lapic_irq vmirq;
767
768 bzero(&vmirq, sizeof(vmirq));
769 vmirq.vector = vector;
770
771 return (vcpu_ioctl(vcpu, VM_LAPIC_IRQ, &vmirq));
772 }
773
774 int
vm_lapic_local_irq(struct vcpu * vcpu,int vector)775 vm_lapic_local_irq(struct vcpu *vcpu, int vector)
776 {
777 struct vm_lapic_irq vmirq;
778
779 bzero(&vmirq, sizeof(vmirq));
780 vmirq.vector = vector;
781
782 return (vcpu_ioctl(vcpu, VM_LAPIC_LOCAL_IRQ, &vmirq));
783 }
784
785 int
vm_lapic_msi(struct vmctx * ctx,uint64_t addr,uint64_t msg)786 vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg)
787 {
788 struct vm_lapic_msi vmmsi;
789
790 bzero(&vmmsi, sizeof(vmmsi));
791 vmmsi.addr = addr;
792 vmmsi.msg = msg;
793
794 return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi));
795 }
796
797 int
vm_ioapic_assert_irq(struct vmctx * ctx,int irq)798 vm_ioapic_assert_irq(struct vmctx *ctx, int irq)
799 {
800 struct vm_ioapic_irq ioapic_irq;
801
802 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
803 ioapic_irq.irq = irq;
804
805 return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq));
806 }
807
808 int
vm_ioapic_deassert_irq(struct vmctx * ctx,int irq)809 vm_ioapic_deassert_irq(struct vmctx *ctx, int irq)
810 {
811 struct vm_ioapic_irq ioapic_irq;
812
813 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
814 ioapic_irq.irq = irq;
815
816 return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq));
817 }
818
819 int
vm_ioapic_pulse_irq(struct vmctx * ctx,int irq)820 vm_ioapic_pulse_irq(struct vmctx *ctx, int irq)
821 {
822 struct vm_ioapic_irq ioapic_irq;
823
824 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
825 ioapic_irq.irq = irq;
826
827 return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq));
828 }
829
830 int
vm_ioapic_pincount(struct vmctx * ctx,int * pincount)831 vm_ioapic_pincount(struct vmctx *ctx, int *pincount)
832 {
833
834 return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount));
835 }
836
837 int
vm_readwrite_kernemu_device(struct vcpu * vcpu,vm_paddr_t gpa,bool write,int size,uint64_t * value)838 vm_readwrite_kernemu_device(struct vcpu *vcpu, vm_paddr_t gpa,
839 bool write, int size, uint64_t *value)
840 {
841 struct vm_readwrite_kernemu_device irp = {
842 .access_width = fls(size) - 1,
843 .gpa = gpa,
844 .value = write ? *value : ~0ul,
845 };
846 long cmd = (write ? VM_SET_KERNEMU_DEV : VM_GET_KERNEMU_DEV);
847 int rc;
848
849 rc = vcpu_ioctl(vcpu, cmd, &irp);
850 if (rc == 0 && !write)
851 *value = irp.value;
852 return (rc);
853 }
854
855 int
vm_isa_assert_irq(struct vmctx * ctx,int atpic_irq,int ioapic_irq)856 vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
857 {
858 struct vm_isa_irq isa_irq;
859
860 bzero(&isa_irq, sizeof(struct vm_isa_irq));
861 isa_irq.atpic_irq = atpic_irq;
862 isa_irq.ioapic_irq = ioapic_irq;
863
864 return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq));
865 }
866
867 int
vm_isa_deassert_irq(struct vmctx * ctx,int atpic_irq,int ioapic_irq)868 vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
869 {
870 struct vm_isa_irq isa_irq;
871
872 bzero(&isa_irq, sizeof(struct vm_isa_irq));
873 isa_irq.atpic_irq = atpic_irq;
874 isa_irq.ioapic_irq = ioapic_irq;
875
876 return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq));
877 }
878
879 int
vm_isa_pulse_irq(struct vmctx * ctx,int atpic_irq,int ioapic_irq)880 vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
881 {
882 struct vm_isa_irq isa_irq;
883
884 bzero(&isa_irq, sizeof(struct vm_isa_irq));
885 isa_irq.atpic_irq = atpic_irq;
886 isa_irq.ioapic_irq = ioapic_irq;
887
888 return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq));
889 }
890
891 int
vm_isa_set_irq_trigger(struct vmctx * ctx,int atpic_irq,enum vm_intr_trigger trigger)892 vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq,
893 enum vm_intr_trigger trigger)
894 {
895 struct vm_isa_irq_trigger isa_irq_trigger;
896
897 bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger));
898 isa_irq_trigger.atpic_irq = atpic_irq;
899 isa_irq_trigger.trigger = trigger;
900
901 return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger));
902 }
903
904 int
vm_inject_nmi(struct vcpu * vcpu)905 vm_inject_nmi(struct vcpu *vcpu)
906 {
907 struct vm_nmi vmnmi;
908
909 bzero(&vmnmi, sizeof(vmnmi));
910
911 return (vcpu_ioctl(vcpu, VM_INJECT_NMI, &vmnmi));
912 }
913
914 static const char *capstrmap[] = {
915 [VM_CAP_HALT_EXIT] = "hlt_exit",
916 [VM_CAP_MTRAP_EXIT] = "mtrap_exit",
917 [VM_CAP_PAUSE_EXIT] = "pause_exit",
918 [VM_CAP_UNRESTRICTED_GUEST] = "unrestricted_guest",
919 [VM_CAP_ENABLE_INVPCID] = "enable_invpcid",
920 [VM_CAP_BPT_EXIT] = "bpt_exit",
921 [VM_CAP_RDPID] = "rdpid",
922 [VM_CAP_RDTSCP] = "rdtscp",
923 [VM_CAP_IPI_EXIT] = "ipi_exit",
924 [VM_CAP_MASK_HWINTR] = "mask_hwintr",
925 [VM_CAP_RFLAGS_TF] = "rflags_tf",
926 };
927
928 int
vm_capability_name2type(const char * capname)929 vm_capability_name2type(const char *capname)
930 {
931 int i;
932
933 for (i = 0; i < (int)nitems(capstrmap); i++) {
934 if (strcmp(capstrmap[i], capname) == 0)
935 return (i);
936 }
937
938 return (-1);
939 }
940
941 const char *
vm_capability_type2name(int type)942 vm_capability_type2name(int type)
943 {
944 if (type >= 0 && type < (int)nitems(capstrmap))
945 return (capstrmap[type]);
946
947 return (NULL);
948 }
949
950 int
vm_get_capability(struct vcpu * vcpu,enum vm_cap_type cap,int * retval)951 vm_get_capability(struct vcpu *vcpu, enum vm_cap_type cap, int *retval)
952 {
953 int error;
954 struct vm_capability vmcap;
955
956 bzero(&vmcap, sizeof(vmcap));
957 vmcap.captype = cap;
958
959 error = vcpu_ioctl(vcpu, VM_GET_CAPABILITY, &vmcap);
960 *retval = vmcap.capval;
961 return (error);
962 }
963
964 int
vm_set_capability(struct vcpu * vcpu,enum vm_cap_type cap,int val)965 vm_set_capability(struct vcpu *vcpu, enum vm_cap_type cap, int val)
966 {
967 struct vm_capability vmcap;
968
969 bzero(&vmcap, sizeof(vmcap));
970 vmcap.captype = cap;
971 vmcap.capval = val;
972
973 return (vcpu_ioctl(vcpu, VM_SET_CAPABILITY, &vmcap));
974 }
975
976 int
vm_assign_pptdev(struct vmctx * ctx,int bus,int slot,int func)977 vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
978 {
979 struct vm_pptdev pptdev;
980
981 bzero(&pptdev, sizeof(pptdev));
982 pptdev.bus = bus;
983 pptdev.slot = slot;
984 pptdev.func = func;
985
986 return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
987 }
988
989 int
vm_unassign_pptdev(struct vmctx * ctx,int bus,int slot,int func)990 vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
991 {
992 struct vm_pptdev pptdev;
993
994 bzero(&pptdev, sizeof(pptdev));
995 pptdev.bus = bus;
996 pptdev.slot = slot;
997 pptdev.func = func;
998
999 return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
1000 }
1001
1002 int
vm_map_pptdev_mmio(struct vmctx * ctx,int bus,int slot,int func,vm_paddr_t gpa,size_t len,vm_paddr_t hpa)1003 vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
1004 vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
1005 {
1006 struct vm_pptdev_mmio pptmmio;
1007
1008 bzero(&pptmmio, sizeof(pptmmio));
1009 pptmmio.bus = bus;
1010 pptmmio.slot = slot;
1011 pptmmio.func = func;
1012 pptmmio.gpa = gpa;
1013 pptmmio.len = len;
1014 pptmmio.hpa = hpa;
1015
1016 return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
1017 }
1018
1019 int
vm_unmap_pptdev_mmio(struct vmctx * ctx,int bus,int slot,int func,vm_paddr_t gpa,size_t len)1020 vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
1021 vm_paddr_t gpa, size_t len)
1022 {
1023 struct vm_pptdev_mmio pptmmio;
1024
1025 bzero(&pptmmio, sizeof(pptmmio));
1026 pptmmio.bus = bus;
1027 pptmmio.slot = slot;
1028 pptmmio.func = func;
1029 pptmmio.gpa = gpa;
1030 pptmmio.len = len;
1031
1032 return (ioctl(ctx->fd, VM_UNMAP_PPTDEV_MMIO, &pptmmio));
1033 }
1034
1035 int
vm_setup_pptdev_msi(struct vmctx * ctx,int bus,int slot,int func,uint64_t addr,uint64_t msg,int numvec)1036 vm_setup_pptdev_msi(struct vmctx *ctx, int bus, int slot, int func,
1037 uint64_t addr, uint64_t msg, int numvec)
1038 {
1039 struct vm_pptdev_msi pptmsi;
1040
1041 bzero(&pptmsi, sizeof(pptmsi));
1042 pptmsi.bus = bus;
1043 pptmsi.slot = slot;
1044 pptmsi.func = func;
1045 pptmsi.msg = msg;
1046 pptmsi.addr = addr;
1047 pptmsi.numvec = numvec;
1048
1049 return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
1050 }
1051
1052 int
vm_setup_pptdev_msix(struct vmctx * ctx,int bus,int slot,int func,int idx,uint64_t addr,uint64_t msg,uint32_t vector_control)1053 vm_setup_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func,
1054 int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
1055 {
1056 struct vm_pptdev_msix pptmsix;
1057
1058 bzero(&pptmsix, sizeof(pptmsix));
1059 pptmsix.bus = bus;
1060 pptmsix.slot = slot;
1061 pptmsix.func = func;
1062 pptmsix.idx = idx;
1063 pptmsix.msg = msg;
1064 pptmsix.addr = addr;
1065 pptmsix.vector_control = vector_control;
1066
1067 return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
1068 }
1069
1070 int
vm_disable_pptdev_msix(struct vmctx * ctx,int bus,int slot,int func)1071 vm_disable_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func)
1072 {
1073 struct vm_pptdev ppt;
1074
1075 bzero(&ppt, sizeof(ppt));
1076 ppt.bus = bus;
1077 ppt.slot = slot;
1078 ppt.func = func;
1079
1080 return ioctl(ctx->fd, VM_PPTDEV_DISABLE_MSIX, &ppt);
1081 }
1082
1083 uint64_t *
vm_get_stats(struct vcpu * vcpu,struct timeval * ret_tv,int * ret_entries)1084 vm_get_stats(struct vcpu *vcpu, struct timeval *ret_tv,
1085 int *ret_entries)
1086 {
1087 static _Thread_local uint64_t *stats_buf;
1088 static _Thread_local u_int stats_count;
1089 uint64_t *new_stats;
1090 struct vm_stats vmstats;
1091 u_int count, index;
1092 bool have_stats;
1093
1094 have_stats = false;
1095 count = 0;
1096 for (index = 0;; index += nitems(vmstats.statbuf)) {
1097 vmstats.index = index;
1098 if (vcpu_ioctl(vcpu, VM_STATS, &vmstats) != 0)
1099 break;
1100 if (stats_count < index + vmstats.num_entries) {
1101 new_stats = realloc(stats_buf,
1102 (index + vmstats.num_entries) * sizeof(uint64_t));
1103 if (new_stats == NULL) {
1104 errno = ENOMEM;
1105 return (NULL);
1106 }
1107 stats_count = index + vmstats.num_entries;
1108 stats_buf = new_stats;
1109 }
1110 memcpy(stats_buf + index, vmstats.statbuf,
1111 vmstats.num_entries * sizeof(uint64_t));
1112 count += vmstats.num_entries;
1113 have_stats = true;
1114
1115 if (vmstats.num_entries != nitems(vmstats.statbuf))
1116 break;
1117 }
1118 if (have_stats) {
1119 if (ret_entries)
1120 *ret_entries = count;
1121 if (ret_tv)
1122 *ret_tv = vmstats.tv;
1123 return (stats_buf);
1124 } else
1125 return (NULL);
1126 }
1127
1128 const char *
vm_get_stat_desc(struct vmctx * ctx,int index)1129 vm_get_stat_desc(struct vmctx *ctx, int index)
1130 {
1131 static struct vm_stat_desc statdesc;
1132
1133 statdesc.index = index;
1134 if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0)
1135 return (statdesc.desc);
1136 else
1137 return (NULL);
1138 }
1139
1140 int
vm_get_x2apic_state(struct vcpu * vcpu,enum x2apic_state * state)1141 vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state)
1142 {
1143 int error;
1144 struct vm_x2apic x2apic;
1145
1146 bzero(&x2apic, sizeof(x2apic));
1147
1148 error = vcpu_ioctl(vcpu, VM_GET_X2APIC_STATE, &x2apic);
1149 *state = x2apic.state;
1150 return (error);
1151 }
1152
1153 int
vm_set_x2apic_state(struct vcpu * vcpu,enum x2apic_state state)1154 vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state)
1155 {
1156 int error;
1157 struct vm_x2apic x2apic;
1158
1159 bzero(&x2apic, sizeof(x2apic));
1160 x2apic.state = state;
1161
1162 error = vcpu_ioctl(vcpu, VM_SET_X2APIC_STATE, &x2apic);
1163
1164 return (error);
1165 }
1166
1167 /*
1168 * From Intel Vol 3a:
1169 * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT
1170 */
1171 int
vcpu_reset(struct vcpu * vcpu)1172 vcpu_reset(struct vcpu *vcpu)
1173 {
1174 int error;
1175 uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx;
1176 uint32_t desc_access, desc_limit;
1177 uint16_t sel;
1178
1179 zero = 0;
1180
1181 rflags = 0x2;
1182 error = vm_set_register(vcpu, VM_REG_GUEST_RFLAGS, rflags);
1183 if (error)
1184 goto done;
1185
1186 rip = 0xfff0;
1187 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RIP, rip)) != 0)
1188 goto done;
1189
1190 /*
1191 * According to Intels Software Developer Manual CR0 should be
1192 * initialized with CR0_ET | CR0_NW | CR0_CD but that crashes some
1193 * guests like Windows.
1194 */
1195 cr0 = CR0_NE;
1196 if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
1197 goto done;
1198
1199 if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR2, zero)) != 0)
1200 goto done;
1201
1202 if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR3, zero)) != 0)
1203 goto done;
1204
1205 cr4 = 0;
1206 if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
1207 goto done;
1208
1209 /*
1210 * CS: present, r/w, accessed, 16-bit, byte granularity, usable
1211 */
1212 desc_base = 0xffff0000;
1213 desc_limit = 0xffff;
1214 desc_access = 0x0093;
1215 error = vm_set_desc(vcpu, VM_REG_GUEST_CS,
1216 desc_base, desc_limit, desc_access);
1217 if (error)
1218 goto done;
1219
1220 sel = 0xf000;
1221 if ((error = vm_set_register(vcpu, VM_REG_GUEST_CS, sel)) != 0)
1222 goto done;
1223
1224 /*
1225 * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity
1226 */
1227 desc_base = 0;
1228 desc_limit = 0xffff;
1229 desc_access = 0x0093;
1230 error = vm_set_desc(vcpu, VM_REG_GUEST_SS,
1231 desc_base, desc_limit, desc_access);
1232 if (error)
1233 goto done;
1234
1235 error = vm_set_desc(vcpu, VM_REG_GUEST_DS,
1236 desc_base, desc_limit, desc_access);
1237 if (error)
1238 goto done;
1239
1240 error = vm_set_desc(vcpu, VM_REG_GUEST_ES,
1241 desc_base, desc_limit, desc_access);
1242 if (error)
1243 goto done;
1244
1245 error = vm_set_desc(vcpu, VM_REG_GUEST_FS,
1246 desc_base, desc_limit, desc_access);
1247 if (error)
1248 goto done;
1249
1250 error = vm_set_desc(vcpu, VM_REG_GUEST_GS,
1251 desc_base, desc_limit, desc_access);
1252 if (error)
1253 goto done;
1254
1255 sel = 0;
1256 if ((error = vm_set_register(vcpu, VM_REG_GUEST_SS, sel)) != 0)
1257 goto done;
1258 if ((error = vm_set_register(vcpu, VM_REG_GUEST_DS, sel)) != 0)
1259 goto done;
1260 if ((error = vm_set_register(vcpu, VM_REG_GUEST_ES, sel)) != 0)
1261 goto done;
1262 if ((error = vm_set_register(vcpu, VM_REG_GUEST_FS, sel)) != 0)
1263 goto done;
1264 if ((error = vm_set_register(vcpu, VM_REG_GUEST_GS, sel)) != 0)
1265 goto done;
1266
1267 if ((error = vm_set_register(vcpu, VM_REG_GUEST_EFER, zero)) != 0)
1268 goto done;
1269
1270 /* General purpose registers */
1271 rdx = 0xf00;
1272 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RAX, zero)) != 0)
1273 goto done;
1274 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RBX, zero)) != 0)
1275 goto done;
1276 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RCX, zero)) != 0)
1277 goto done;
1278 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RDX, rdx)) != 0)
1279 goto done;
1280 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RSI, zero)) != 0)
1281 goto done;
1282 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RDI, zero)) != 0)
1283 goto done;
1284 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RBP, zero)) != 0)
1285 goto done;
1286 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RSP, zero)) != 0)
1287 goto done;
1288 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R8, zero)) != 0)
1289 goto done;
1290 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R9, zero)) != 0)
1291 goto done;
1292 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R10, zero)) != 0)
1293 goto done;
1294 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R11, zero)) != 0)
1295 goto done;
1296 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R12, zero)) != 0)
1297 goto done;
1298 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R13, zero)) != 0)
1299 goto done;
1300 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R14, zero)) != 0)
1301 goto done;
1302 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R15, zero)) != 0)
1303 goto done;
1304
1305 /* GDTR, IDTR */
1306 desc_base = 0;
1307 desc_limit = 0xffff;
1308 desc_access = 0;
1309 error = vm_set_desc(vcpu, VM_REG_GUEST_GDTR,
1310 desc_base, desc_limit, desc_access);
1311 if (error != 0)
1312 goto done;
1313
1314 error = vm_set_desc(vcpu, VM_REG_GUEST_IDTR,
1315 desc_base, desc_limit, desc_access);
1316 if (error != 0)
1317 goto done;
1318
1319 /* TR */
1320 desc_base = 0;
1321 desc_limit = 0xffff;
1322 desc_access = 0x0000008b;
1323 error = vm_set_desc(vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
1324 if (error)
1325 goto done;
1326
1327 sel = 0;
1328 if ((error = vm_set_register(vcpu, VM_REG_GUEST_TR, sel)) != 0)
1329 goto done;
1330
1331 /* LDTR */
1332 desc_base = 0;
1333 desc_limit = 0xffff;
1334 desc_access = 0x00000082;
1335 error = vm_set_desc(vcpu, VM_REG_GUEST_LDTR, desc_base,
1336 desc_limit, desc_access);
1337 if (error)
1338 goto done;
1339
1340 sel = 0;
1341 if ((error = vm_set_register(vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
1342 goto done;
1343
1344 if ((error = vm_set_register(vcpu, VM_REG_GUEST_DR6,
1345 0xffff0ff0)) != 0)
1346 goto done;
1347 if ((error = vm_set_register(vcpu, VM_REG_GUEST_DR7, 0x400)) !=
1348 0)
1349 goto done;
1350
1351 if ((error = vm_set_register(vcpu, VM_REG_GUEST_INTR_SHADOW,
1352 zero)) != 0)
1353 goto done;
1354
1355 error = 0;
1356 done:
1357 return (error);
1358 }
1359
1360 int
vm_get_gpa_pmap(struct vmctx * ctx,uint64_t gpa,uint64_t * pte,int * num)1361 vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num)
1362 {
1363 int error, i;
1364 struct vm_gpa_pte gpapte;
1365
1366 bzero(&gpapte, sizeof(gpapte));
1367 gpapte.gpa = gpa;
1368
1369 error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte);
1370
1371 if (error == 0) {
1372 *num = gpapte.ptenum;
1373 for (i = 0; i < gpapte.ptenum; i++)
1374 pte[i] = gpapte.pte[i];
1375 }
1376
1377 return (error);
1378 }
1379
1380 int
vm_get_hpet_capabilities(struct vmctx * ctx,uint32_t * capabilities)1381 vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities)
1382 {
1383 int error;
1384 struct vm_hpet_cap cap;
1385
1386 bzero(&cap, sizeof(struct vm_hpet_cap));
1387 error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap);
1388 if (capabilities != NULL)
1389 *capabilities = cap.capabilities;
1390 return (error);
1391 }
1392
1393 int
vm_gla2gpa(struct vcpu * vcpu,struct vm_guest_paging * paging,uint64_t gla,int prot,uint64_t * gpa,int * fault)1394 vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging,
1395 uint64_t gla, int prot, uint64_t *gpa, int *fault)
1396 {
1397 struct vm_gla2gpa gg;
1398 int error;
1399
1400 bzero(&gg, sizeof(struct vm_gla2gpa));
1401 gg.prot = prot;
1402 gg.gla = gla;
1403 gg.paging = *paging;
1404
1405 error = vcpu_ioctl(vcpu, VM_GLA2GPA, &gg);
1406 if (error == 0) {
1407 *fault = gg.fault;
1408 *gpa = gg.gpa;
1409 }
1410 return (error);
1411 }
1412
1413 int
vm_gla2gpa_nofault(struct vcpu * vcpu,struct vm_guest_paging * paging,uint64_t gla,int prot,uint64_t * gpa,int * fault)1414 vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
1415 uint64_t gla, int prot, uint64_t *gpa, int *fault)
1416 {
1417 struct vm_gla2gpa gg;
1418 int error;
1419
1420 bzero(&gg, sizeof(struct vm_gla2gpa));
1421 gg.prot = prot;
1422 gg.gla = gla;
1423 gg.paging = *paging;
1424
1425 error = vcpu_ioctl(vcpu, VM_GLA2GPA_NOFAULT, &gg);
1426 if (error == 0) {
1427 *fault = gg.fault;
1428 *gpa = gg.gpa;
1429 }
1430 return (error);
1431 }
1432
1433 #ifndef min
1434 #define min(a,b) (((a) < (b)) ? (a) : (b))
1435 #endif
1436
1437 int
vm_copy_setup(struct vcpu * vcpu,struct vm_guest_paging * paging,uint64_t gla,size_t len,int prot,struct iovec * iov,int iovcnt,int * fault)1438 vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging,
1439 uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt,
1440 int *fault)
1441 {
1442 void *va;
1443 uint64_t gpa, off;
1444 int error, i, n;
1445
1446 for (i = 0; i < iovcnt; i++) {
1447 iov[i].iov_base = 0;
1448 iov[i].iov_len = 0;
1449 }
1450
1451 while (len) {
1452 assert(iovcnt > 0);
1453 error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault);
1454 if (error || *fault)
1455 return (error);
1456
1457 off = gpa & PAGE_MASK;
1458 n = MIN(len, PAGE_SIZE - off);
1459
1460 va = vm_map_gpa(vcpu->ctx, gpa, n);
1461 if (va == NULL)
1462 return (EFAULT);
1463
1464 iov->iov_base = va;
1465 iov->iov_len = n;
1466 iov++;
1467 iovcnt--;
1468
1469 gla += n;
1470 len -= n;
1471 }
1472 return (0);
1473 }
1474
1475 void
vm_copy_teardown(struct iovec * iov __unused,int iovcnt __unused)1476 vm_copy_teardown(struct iovec *iov __unused, int iovcnt __unused)
1477 {
1478 /*
1479 * Intentionally empty. This is used by the instruction
1480 * emulation code shared with the kernel. The in-kernel
1481 * version of this is non-empty.
1482 */
1483 }
1484
1485 void
vm_copyin(struct iovec * iov,void * vp,size_t len)1486 vm_copyin(struct iovec *iov, void *vp, size_t len)
1487 {
1488 const char *src;
1489 char *dst;
1490 size_t n;
1491
1492 dst = vp;
1493 while (len) {
1494 assert(iov->iov_len);
1495 n = min(len, iov->iov_len);
1496 src = iov->iov_base;
1497 bcopy(src, dst, n);
1498
1499 iov++;
1500 dst += n;
1501 len -= n;
1502 }
1503 }
1504
1505 void
vm_copyout(const void * vp,struct iovec * iov,size_t len)1506 vm_copyout(const void *vp, struct iovec *iov, size_t len)
1507 {
1508 const char *src;
1509 char *dst;
1510 size_t n;
1511
1512 src = vp;
1513 while (len) {
1514 assert(iov->iov_len);
1515 n = min(len, iov->iov_len);
1516 dst = iov->iov_base;
1517 bcopy(src, dst, n);
1518
1519 iov++;
1520 src += n;
1521 len -= n;
1522 }
1523 }
1524
1525 static int
vm_get_cpus(struct vmctx * ctx,int which,cpuset_t * cpus)1526 vm_get_cpus(struct vmctx *ctx, int which, cpuset_t *cpus)
1527 {
1528 struct vm_cpuset vm_cpuset;
1529 int error;
1530
1531 bzero(&vm_cpuset, sizeof(struct vm_cpuset));
1532 vm_cpuset.which = which;
1533 vm_cpuset.cpusetsize = sizeof(cpuset_t);
1534 vm_cpuset.cpus = cpus;
1535
1536 error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset);
1537 return (error);
1538 }
1539
1540 int
vm_active_cpus(struct vmctx * ctx,cpuset_t * cpus)1541 vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus)
1542 {
1543
1544 return (vm_get_cpus(ctx, VM_ACTIVE_CPUS, cpus));
1545 }
1546
1547 int
vm_suspended_cpus(struct vmctx * ctx,cpuset_t * cpus)1548 vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus)
1549 {
1550
1551 return (vm_get_cpus(ctx, VM_SUSPENDED_CPUS, cpus));
1552 }
1553
1554 int
vm_debug_cpus(struct vmctx * ctx,cpuset_t * cpus)1555 vm_debug_cpus(struct vmctx *ctx, cpuset_t *cpus)
1556 {
1557
1558 return (vm_get_cpus(ctx, VM_DEBUG_CPUS, cpus));
1559 }
1560
1561 int
vm_activate_cpu(struct vcpu * vcpu)1562 vm_activate_cpu(struct vcpu *vcpu)
1563 {
1564 struct vm_activate_cpu ac;
1565 int error;
1566
1567 bzero(&ac, sizeof(struct vm_activate_cpu));
1568 error = vcpu_ioctl(vcpu, VM_ACTIVATE_CPU, &ac);
1569 return (error);
1570 }
1571
1572 int
vm_suspend_all_cpus(struct vmctx * ctx)1573 vm_suspend_all_cpus(struct vmctx *ctx)
1574 {
1575 struct vm_activate_cpu ac;
1576 int error;
1577
1578 bzero(&ac, sizeof(struct vm_activate_cpu));
1579 ac.vcpuid = -1;
1580 error = ioctl(ctx->fd, VM_SUSPEND_CPU, &ac);
1581 return (error);
1582 }
1583
1584 int
vm_suspend_cpu(struct vcpu * vcpu)1585 vm_suspend_cpu(struct vcpu *vcpu)
1586 {
1587 struct vm_activate_cpu ac;
1588 int error;
1589
1590 bzero(&ac, sizeof(struct vm_activate_cpu));
1591 error = vcpu_ioctl(vcpu, VM_SUSPEND_CPU, &ac);
1592 return (error);
1593 }
1594
1595 int
vm_resume_cpu(struct vcpu * vcpu)1596 vm_resume_cpu(struct vcpu *vcpu)
1597 {
1598 struct vm_activate_cpu ac;
1599 int error;
1600
1601 bzero(&ac, sizeof(struct vm_activate_cpu));
1602 error = vcpu_ioctl(vcpu, VM_RESUME_CPU, &ac);
1603 return (error);
1604 }
1605
1606 int
vm_resume_all_cpus(struct vmctx * ctx)1607 vm_resume_all_cpus(struct vmctx *ctx)
1608 {
1609 struct vm_activate_cpu ac;
1610 int error;
1611
1612 bzero(&ac, sizeof(struct vm_activate_cpu));
1613 ac.vcpuid = -1;
1614 error = ioctl(ctx->fd, VM_RESUME_CPU, &ac);
1615 return (error);
1616 }
1617
1618 int
vm_get_intinfo(struct vcpu * vcpu,uint64_t * info1,uint64_t * info2)1619 vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2)
1620 {
1621 struct vm_intinfo vmii;
1622 int error;
1623
1624 bzero(&vmii, sizeof(struct vm_intinfo));
1625 error = vcpu_ioctl(vcpu, VM_GET_INTINFO, &vmii);
1626 if (error == 0) {
1627 *info1 = vmii.info1;
1628 *info2 = vmii.info2;
1629 }
1630 return (error);
1631 }
1632
1633 int
vm_set_intinfo(struct vcpu * vcpu,uint64_t info1)1634 vm_set_intinfo(struct vcpu *vcpu, uint64_t info1)
1635 {
1636 struct vm_intinfo vmii;
1637 int error;
1638
1639 bzero(&vmii, sizeof(struct vm_intinfo));
1640 vmii.info1 = info1;
1641 error = vcpu_ioctl(vcpu, VM_SET_INTINFO, &vmii);
1642 return (error);
1643 }
1644
1645 int
vm_rtc_write(struct vmctx * ctx,int offset,uint8_t value)1646 vm_rtc_write(struct vmctx *ctx, int offset, uint8_t value)
1647 {
1648 struct vm_rtc_data rtcdata;
1649 int error;
1650
1651 bzero(&rtcdata, sizeof(struct vm_rtc_data));
1652 rtcdata.offset = offset;
1653 rtcdata.value = value;
1654 error = ioctl(ctx->fd, VM_RTC_WRITE, &rtcdata);
1655 return (error);
1656 }
1657
1658 int
vm_rtc_read(struct vmctx * ctx,int offset,uint8_t * retval)1659 vm_rtc_read(struct vmctx *ctx, int offset, uint8_t *retval)
1660 {
1661 struct vm_rtc_data rtcdata;
1662 int error;
1663
1664 bzero(&rtcdata, sizeof(struct vm_rtc_data));
1665 rtcdata.offset = offset;
1666 error = ioctl(ctx->fd, VM_RTC_READ, &rtcdata);
1667 if (error == 0)
1668 *retval = rtcdata.value;
1669 return (error);
1670 }
1671
1672 int
vm_rtc_settime(struct vmctx * ctx,time_t secs)1673 vm_rtc_settime(struct vmctx *ctx, time_t secs)
1674 {
1675 struct vm_rtc_time rtctime;
1676 int error;
1677
1678 bzero(&rtctime, sizeof(struct vm_rtc_time));
1679 rtctime.secs = secs;
1680 error = ioctl(ctx->fd, VM_RTC_SETTIME, &rtctime);
1681 return (error);
1682 }
1683
1684 int
vm_rtc_gettime(struct vmctx * ctx,time_t * secs)1685 vm_rtc_gettime(struct vmctx *ctx, time_t *secs)
1686 {
1687 struct vm_rtc_time rtctime;
1688 int error;
1689
1690 bzero(&rtctime, sizeof(struct vm_rtc_time));
1691 error = ioctl(ctx->fd, VM_RTC_GETTIME, &rtctime);
1692 if (error == 0)
1693 *secs = rtctime.secs;
1694 return (error);
1695 }
1696
1697 int
vm_restart_instruction(struct vcpu * vcpu)1698 vm_restart_instruction(struct vcpu *vcpu)
1699 {
1700 int arg;
1701
1702 return (vcpu_ioctl(vcpu, VM_RESTART_INSTRUCTION, &arg));
1703 }
1704
1705 int
vm_snapshot_req(struct vmctx * ctx,struct vm_snapshot_meta * meta)1706 vm_snapshot_req(struct vmctx *ctx, struct vm_snapshot_meta *meta)
1707 {
1708
1709 if (ioctl(ctx->fd, VM_SNAPSHOT_REQ, meta) == -1) {
1710 #ifdef SNAPSHOT_DEBUG
1711 fprintf(stderr, "%s: snapshot failed for %s: %d\r\n",
1712 __func__, meta->dev_name, errno);
1713 #endif
1714 return (-1);
1715 }
1716 return (0);
1717 }
1718
1719 int
vm_restore_time(struct vmctx * ctx)1720 vm_restore_time(struct vmctx *ctx)
1721 {
1722 int dummy;
1723
1724 dummy = 0;
1725 return (ioctl(ctx->fd, VM_RESTORE_TIME, &dummy));
1726 }
1727
1728 int
vm_set_topology(struct vmctx * ctx,uint16_t sockets,uint16_t cores,uint16_t threads,uint16_t maxcpus)1729 vm_set_topology(struct vmctx *ctx,
1730 uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus)
1731 {
1732 struct vm_cpu_topology topology;
1733
1734 bzero(&topology, sizeof (struct vm_cpu_topology));
1735 topology.sockets = sockets;
1736 topology.cores = cores;
1737 topology.threads = threads;
1738 topology.maxcpus = maxcpus;
1739 return (ioctl(ctx->fd, VM_SET_TOPOLOGY, &topology));
1740 }
1741
1742 int
vm_get_topology(struct vmctx * ctx,uint16_t * sockets,uint16_t * cores,uint16_t * threads,uint16_t * maxcpus)1743 vm_get_topology(struct vmctx *ctx,
1744 uint16_t *sockets, uint16_t *cores, uint16_t *threads, uint16_t *maxcpus)
1745 {
1746 struct vm_cpu_topology topology;
1747 int error;
1748
1749 bzero(&topology, sizeof (struct vm_cpu_topology));
1750 error = ioctl(ctx->fd, VM_GET_TOPOLOGY, &topology);
1751 if (error == 0) {
1752 *sockets = topology.sockets;
1753 *cores = topology.cores;
1754 *threads = topology.threads;
1755 *maxcpus = topology.maxcpus;
1756 }
1757 return (error);
1758 }
1759
1760 /* Keep in sync with machine/vmm_dev.h. */
1761 static const cap_ioctl_t vm_ioctl_cmds[] = { VM_RUN, VM_SUSPEND, VM_REINIT,
1762 VM_ALLOC_MEMSEG, VM_GET_MEMSEG, VM_MMAP_MEMSEG, VM_MMAP_MEMSEG,
1763 VM_MMAP_GETNEXT, VM_MUNMAP_MEMSEG, VM_SET_REGISTER, VM_GET_REGISTER,
1764 VM_SET_SEGMENT_DESCRIPTOR, VM_GET_SEGMENT_DESCRIPTOR,
1765 VM_SET_REGISTER_SET, VM_GET_REGISTER_SET,
1766 VM_SET_KERNEMU_DEV, VM_GET_KERNEMU_DEV,
1767 VM_INJECT_EXCEPTION, VM_LAPIC_IRQ, VM_LAPIC_LOCAL_IRQ,
1768 VM_LAPIC_MSI, VM_IOAPIC_ASSERT_IRQ, VM_IOAPIC_DEASSERT_IRQ,
1769 VM_IOAPIC_PULSE_IRQ, VM_IOAPIC_PINCOUNT, VM_ISA_ASSERT_IRQ,
1770 VM_ISA_DEASSERT_IRQ, VM_ISA_PULSE_IRQ, VM_ISA_SET_IRQ_TRIGGER,
1771 VM_SET_CAPABILITY, VM_GET_CAPABILITY, VM_BIND_PPTDEV,
1772 VM_UNBIND_PPTDEV, VM_MAP_PPTDEV_MMIO, VM_PPTDEV_MSI,
1773 VM_PPTDEV_MSIX, VM_UNMAP_PPTDEV_MMIO, VM_PPTDEV_DISABLE_MSIX,
1774 VM_INJECT_NMI, VM_STATS, VM_STAT_DESC,
1775 VM_SET_X2APIC_STATE, VM_GET_X2APIC_STATE,
1776 VM_GET_HPET_CAPABILITIES, VM_GET_GPA_PMAP, VM_GLA2GPA,
1777 VM_GLA2GPA_NOFAULT,
1778 VM_ACTIVATE_CPU, VM_GET_CPUS, VM_SUSPEND_CPU, VM_RESUME_CPU,
1779 VM_SET_INTINFO, VM_GET_INTINFO,
1780 VM_RTC_WRITE, VM_RTC_READ, VM_RTC_SETTIME, VM_RTC_GETTIME,
1781 VM_RESTART_INSTRUCTION, VM_SET_TOPOLOGY, VM_GET_TOPOLOGY,
1782 VM_SNAPSHOT_REQ, VM_RESTORE_TIME
1783 };
1784
1785 int
vm_limit_rights(struct vmctx * ctx)1786 vm_limit_rights(struct vmctx *ctx)
1787 {
1788 cap_rights_t rights;
1789 size_t ncmds;
1790
1791 cap_rights_init(&rights, CAP_IOCTL, CAP_MMAP_RW);
1792 if (caph_rights_limit(ctx->fd, &rights) != 0)
1793 return (-1);
1794 ncmds = nitems(vm_ioctl_cmds);
1795 if (caph_ioctls_limit(ctx->fd, vm_ioctl_cmds, ncmds) != 0)
1796 return (-1);
1797 return (0);
1798 }
1799
1800 /*
1801 * Avoid using in new code. Operations on the fd should be wrapped here so that
1802 * capability rights can be kept in sync.
1803 */
1804 int
vm_get_device_fd(struct vmctx * ctx)1805 vm_get_device_fd(struct vmctx *ctx)
1806 {
1807
1808 return (ctx->fd);
1809 }
1810
1811 /* Legacy interface, do not use. */
1812 const cap_ioctl_t *
vm_get_ioctls(size_t * len)1813 vm_get_ioctls(size_t *len)
1814 {
1815 cap_ioctl_t *cmds;
1816
1817 if (len == NULL) {
1818 cmds = malloc(sizeof(vm_ioctl_cmds));
1819 if (cmds == NULL)
1820 return (NULL);
1821 bcopy(vm_ioctl_cmds, cmds, sizeof(vm_ioctl_cmds));
1822 return (cmds);
1823 }
1824
1825 *len = nitems(vm_ioctl_cmds);
1826 return (NULL);
1827 }
1828