1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2011 NetApp, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include <sys/param.h>
35 #include <sys/sysctl.h>
36 #include <sys/ioctl.h>
37 #include <sys/linker.h>
38 #include <sys/mman.h>
39 #include <sys/module.h>
40 #include <sys/_iovec.h>
41 #include <sys/cpuset.h>
42
43 #include <x86/segments.h>
44 #include <machine/specialreg.h>
45
46 #include <errno.h>
47 #include <stdbool.h>
48 #include <stdio.h>
49 #include <stdlib.h>
50 #include <assert.h>
51 #include <string.h>
52 #include <fcntl.h>
53 #include <unistd.h>
54
55 #include <libutil.h>
56
57 #include <vm/vm.h>
58 #include <machine/vmm.h>
59 #include <machine/vmm_dev.h>
60 #include <machine/vmm_snapshot.h>
61
62 #include "vmmapi.h"
63
64 #define MB (1024 * 1024UL)
65 #define GB (1024 * 1024 * 1024UL)
66
67 /*
68 * Size of the guard region before and after the virtual address space
69 * mapping the guest physical memory. This must be a multiple of the
70 * superpage size for performance reasons.
71 */
72 #define VM_MMAP_GUARD_SIZE (4 * MB)
73
74 #define PROT_RW (PROT_READ | PROT_WRITE)
75 #define PROT_ALL (PROT_READ | PROT_WRITE | PROT_EXEC)
76
77 struct vmctx {
78 int fd;
79 uint32_t lowmem_limit;
80 int memflags;
81 size_t lowmem;
82 size_t highmem;
83 char *baseaddr;
84 char *name;
85 };
86
87 #define CREATE(x) sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x)))
88 #define DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x)))
89
90 static int
vm_device_open(const char * name)91 vm_device_open(const char *name)
92 {
93 int fd, len;
94 char *vmfile;
95
96 len = strlen("/dev/vmm/") + strlen(name) + 1;
97 vmfile = malloc(len);
98 assert(vmfile != NULL);
99 snprintf(vmfile, len, "/dev/vmm/%s", name);
100
101 /* Open the device file */
102 fd = open(vmfile, O_RDWR, 0);
103
104 free(vmfile);
105 return (fd);
106 }
107
108 int
vm_create(const char * name)109 vm_create(const char *name)
110 {
111 /* Try to load vmm(4) module before creating a guest. */
112 if (modfind("vmm") < 0)
113 kldload("vmm");
114 return (CREATE(name));
115 }
116
117 struct vmctx *
vm_open(const char * name)118 vm_open(const char *name)
119 {
120 struct vmctx *vm;
121 int saved_errno;
122
123 vm = malloc(sizeof(struct vmctx) + strlen(name) + 1);
124 assert(vm != NULL);
125
126 vm->fd = -1;
127 vm->memflags = 0;
128 vm->lowmem_limit = 3 * GB;
129 vm->name = (char *)(vm + 1);
130 strcpy(vm->name, name);
131
132 if ((vm->fd = vm_device_open(vm->name)) < 0)
133 goto err;
134
135 return (vm);
136 err:
137 saved_errno = errno;
138 free(vm);
139 errno = saved_errno;
140 return (NULL);
141 }
142
143 void
vm_destroy(struct vmctx * vm)144 vm_destroy(struct vmctx *vm)
145 {
146 assert(vm != NULL);
147
148 if (vm->fd >= 0)
149 close(vm->fd);
150 DESTROY(vm->name);
151
152 free(vm);
153 }
154
155 int
vm_parse_memsize(const char * opt,size_t * ret_memsize)156 vm_parse_memsize(const char *opt, size_t *ret_memsize)
157 {
158 char *endptr;
159 size_t optval;
160 int error;
161
162 optval = strtoul(opt, &endptr, 0);
163 if (*opt != '\0' && *endptr == '\0') {
164 /*
165 * For the sake of backward compatibility if the memory size
166 * specified on the command line is less than a megabyte then
167 * it is interpreted as being in units of MB.
168 */
169 if (optval < MB)
170 optval *= MB;
171 *ret_memsize = optval;
172 error = 0;
173 } else
174 error = expand_number(opt, ret_memsize);
175
176 return (error);
177 }
178
179 uint32_t
vm_get_lowmem_limit(struct vmctx * ctx)180 vm_get_lowmem_limit(struct vmctx *ctx)
181 {
182
183 return (ctx->lowmem_limit);
184 }
185
186 void
vm_set_lowmem_limit(struct vmctx * ctx,uint32_t limit)187 vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit)
188 {
189
190 ctx->lowmem_limit = limit;
191 }
192
193 void
vm_set_memflags(struct vmctx * ctx,int flags)194 vm_set_memflags(struct vmctx *ctx, int flags)
195 {
196
197 ctx->memflags = flags;
198 }
199
200 int
vm_get_memflags(struct vmctx * ctx)201 vm_get_memflags(struct vmctx *ctx)
202 {
203
204 return (ctx->memflags);
205 }
206
207 /*
208 * Map segment 'segid' starting at 'off' into guest address range [gpa,gpa+len).
209 */
210 int
vm_mmap_memseg(struct vmctx * ctx,vm_paddr_t gpa,int segid,vm_ooffset_t off,size_t len,int prot)211 vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid, vm_ooffset_t off,
212 size_t len, int prot)
213 {
214 struct vm_memmap memmap;
215 int error, flags;
216
217 memmap.gpa = gpa;
218 memmap.segid = segid;
219 memmap.segoff = off;
220 memmap.len = len;
221 memmap.prot = prot;
222 memmap.flags = 0;
223
224 if (ctx->memflags & VM_MEM_F_WIRED)
225 memmap.flags |= VM_MEMMAP_F_WIRED;
226
227 /*
228 * If this mapping already exists then don't create it again. This
229 * is the common case for SYSMEM mappings created by bhyveload(8).
230 */
231 error = vm_mmap_getnext(ctx, &gpa, &segid, &off, &len, &prot, &flags);
232 if (error == 0 && gpa == memmap.gpa) {
233 if (segid != memmap.segid || off != memmap.segoff ||
234 prot != memmap.prot || flags != memmap.flags) {
235 errno = EEXIST;
236 return (-1);
237 } else {
238 return (0);
239 }
240 }
241
242 error = ioctl(ctx->fd, VM_MMAP_MEMSEG, &memmap);
243 return (error);
244 }
245
246 int
vm_get_guestmem_from_ctx(struct vmctx * ctx,char ** guest_baseaddr,size_t * lowmem_size,size_t * highmem_size)247 vm_get_guestmem_from_ctx(struct vmctx *ctx, char **guest_baseaddr,
248 size_t *lowmem_size, size_t *highmem_size)
249 {
250
251 *guest_baseaddr = ctx->baseaddr;
252 *lowmem_size = ctx->lowmem;
253 *highmem_size = ctx->highmem;
254 return (0);
255 }
256
257 int
vm_munmap_memseg(struct vmctx * ctx,vm_paddr_t gpa,size_t len)258 vm_munmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, size_t len)
259 {
260 struct vm_munmap munmap;
261 int error;
262
263 munmap.gpa = gpa;
264 munmap.len = len;
265
266 error = ioctl(ctx->fd, VM_MUNMAP_MEMSEG, &munmap);
267 return (error);
268 }
269
270 int
vm_mmap_getnext(struct vmctx * ctx,vm_paddr_t * gpa,int * segid,vm_ooffset_t * segoff,size_t * len,int * prot,int * flags)271 vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid,
272 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
273 {
274 struct vm_memmap memmap;
275 int error;
276
277 bzero(&memmap, sizeof(struct vm_memmap));
278 memmap.gpa = *gpa;
279 error = ioctl(ctx->fd, VM_MMAP_GETNEXT, &memmap);
280 if (error == 0) {
281 *gpa = memmap.gpa;
282 *segid = memmap.segid;
283 *segoff = memmap.segoff;
284 *len = memmap.len;
285 *prot = memmap.prot;
286 *flags = memmap.flags;
287 }
288 return (error);
289 }
290
291 /*
292 * Return 0 if the segments are identical and non-zero otherwise.
293 *
294 * This is slightly complicated by the fact that only device memory segments
295 * are named.
296 */
297 static int
cmpseg(size_t len,const char * str,size_t len2,const char * str2)298 cmpseg(size_t len, const char *str, size_t len2, const char *str2)
299 {
300
301 if (len == len2) {
302 if ((!str && !str2) || (str && str2 && !strcmp(str, str2)))
303 return (0);
304 }
305 return (-1);
306 }
307
308 static int
vm_alloc_memseg(struct vmctx * ctx,int segid,size_t len,const char * name)309 vm_alloc_memseg(struct vmctx *ctx, int segid, size_t len, const char *name)
310 {
311 struct vm_memseg memseg;
312 size_t n;
313 int error;
314
315 /*
316 * If the memory segment has already been created then just return.
317 * This is the usual case for the SYSMEM segment created by userspace
318 * loaders like bhyveload(8).
319 */
320 error = vm_get_memseg(ctx, segid, &memseg.len, memseg.name,
321 sizeof(memseg.name));
322 if (error)
323 return (error);
324
325 if (memseg.len != 0) {
326 if (cmpseg(len, name, memseg.len, VM_MEMSEG_NAME(&memseg))) {
327 errno = EINVAL;
328 return (-1);
329 } else {
330 return (0);
331 }
332 }
333
334 bzero(&memseg, sizeof(struct vm_memseg));
335 memseg.segid = segid;
336 memseg.len = len;
337 if (name != NULL) {
338 n = strlcpy(memseg.name, name, sizeof(memseg.name));
339 if (n >= sizeof(memseg.name)) {
340 errno = ENAMETOOLONG;
341 return (-1);
342 }
343 }
344
345 error = ioctl(ctx->fd, VM_ALLOC_MEMSEG, &memseg);
346 return (error);
347 }
348
349 int
vm_get_memseg(struct vmctx * ctx,int segid,size_t * lenp,char * namebuf,size_t bufsize)350 vm_get_memseg(struct vmctx *ctx, int segid, size_t *lenp, char *namebuf,
351 size_t bufsize)
352 {
353 struct vm_memseg memseg;
354 size_t n;
355 int error;
356
357 memseg.segid = segid;
358 error = ioctl(ctx->fd, VM_GET_MEMSEG, &memseg);
359 if (error == 0) {
360 *lenp = memseg.len;
361 n = strlcpy(namebuf, memseg.name, bufsize);
362 if (n >= bufsize) {
363 errno = ENAMETOOLONG;
364 error = -1;
365 }
366 }
367 return (error);
368 }
369
370 static int
setup_memory_segment(struct vmctx * ctx,vm_paddr_t gpa,size_t len,char * base)371 setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char *base)
372 {
373 char *ptr;
374 int error, flags;
375
376 /* Map 'len' bytes starting at 'gpa' in the guest address space */
377 error = vm_mmap_memseg(ctx, gpa, VM_SYSMEM, gpa, len, PROT_ALL);
378 if (error)
379 return (error);
380
381 flags = MAP_SHARED | MAP_FIXED;
382 if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
383 flags |= MAP_NOCORE;
384
385 /* mmap into the process address space on the host */
386 ptr = mmap(base + gpa, len, PROT_RW, flags, ctx->fd, gpa);
387 if (ptr == MAP_FAILED)
388 return (-1);
389
390 return (0);
391 }
392
393 int
vm_setup_memory(struct vmctx * ctx,size_t memsize,enum vm_mmap_style vms)394 vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms)
395 {
396 size_t objsize, len;
397 vm_paddr_t gpa;
398 char *baseaddr, *ptr;
399 int error;
400
401 assert(vms == VM_MMAP_ALL);
402
403 /*
404 * If 'memsize' cannot fit entirely in the 'lowmem' segment then
405 * create another 'highmem' segment above 4GB for the remainder.
406 */
407 if (memsize > ctx->lowmem_limit) {
408 ctx->lowmem = ctx->lowmem_limit;
409 ctx->highmem = memsize - ctx->lowmem_limit;
410 objsize = 4*GB + ctx->highmem;
411 } else {
412 ctx->lowmem = memsize;
413 ctx->highmem = 0;
414 objsize = ctx->lowmem;
415 }
416
417 error = vm_alloc_memseg(ctx, VM_SYSMEM, objsize, NULL);
418 if (error)
419 return (error);
420
421 /*
422 * Stake out a contiguous region covering the guest physical memory
423 * and the adjoining guard regions.
424 */
425 len = VM_MMAP_GUARD_SIZE + objsize + VM_MMAP_GUARD_SIZE;
426 ptr = mmap(NULL, len, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1, 0);
427 if (ptr == MAP_FAILED)
428 return (-1);
429
430 baseaddr = ptr + VM_MMAP_GUARD_SIZE;
431 if (ctx->highmem > 0) {
432 gpa = 4*GB;
433 len = ctx->highmem;
434 error = setup_memory_segment(ctx, gpa, len, baseaddr);
435 if (error)
436 return (error);
437 }
438
439 if (ctx->lowmem > 0) {
440 gpa = 0;
441 len = ctx->lowmem;
442 error = setup_memory_segment(ctx, gpa, len, baseaddr);
443 if (error)
444 return (error);
445 }
446
447 ctx->baseaddr = baseaddr;
448
449 return (0);
450 }
451
452 /*
453 * Returns a non-NULL pointer if [gaddr, gaddr+len) is entirely contained in
454 * the lowmem or highmem regions.
455 *
456 * In particular return NULL if [gaddr, gaddr+len) falls in guest MMIO region.
457 * The instruction emulation code depends on this behavior.
458 */
459 void *
vm_map_gpa(struct vmctx * ctx,vm_paddr_t gaddr,size_t len)460 vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
461 {
462
463 if (ctx->lowmem > 0) {
464 if (gaddr < ctx->lowmem && len <= ctx->lowmem &&
465 gaddr + len <= ctx->lowmem)
466 return (ctx->baseaddr + gaddr);
467 }
468
469 if (ctx->highmem > 0) {
470 if (gaddr >= 4*GB) {
471 if (gaddr < 4*GB + ctx->highmem &&
472 len <= ctx->highmem &&
473 gaddr + len <= 4*GB + ctx->highmem)
474 return (ctx->baseaddr + gaddr);
475 }
476 }
477
478 return (NULL);
479 }
480
481 vm_paddr_t
vm_rev_map_gpa(struct vmctx * ctx,void * addr)482 vm_rev_map_gpa(struct vmctx *ctx, void *addr)
483 {
484 vm_paddr_t offaddr;
485
486 offaddr = (char *)addr - ctx->baseaddr;
487
488 if (ctx->lowmem > 0)
489 if (offaddr >= 0 && offaddr <= ctx->lowmem)
490 return (offaddr);
491
492 if (ctx->highmem > 0)
493 if (offaddr >= 4*GB && offaddr < 4*GB + ctx->highmem)
494 return (offaddr);
495
496 return ((vm_paddr_t)-1);
497 }
498
499 /* TODO: maximum size for vmname */
500 int
vm_get_name(struct vmctx * ctx,char * buf,size_t max_len)501 vm_get_name(struct vmctx *ctx, char *buf, size_t max_len)
502 {
503
504 if (strlcpy(buf, ctx->name, max_len) >= max_len)
505 return (EINVAL);
506 return (0);
507 }
508
509 size_t
vm_get_lowmem_size(struct vmctx * ctx)510 vm_get_lowmem_size(struct vmctx *ctx)
511 {
512
513 return (ctx->lowmem);
514 }
515
516 size_t
vm_get_highmem_size(struct vmctx * ctx)517 vm_get_highmem_size(struct vmctx *ctx)
518 {
519
520 return (ctx->highmem);
521 }
522
523 void *
vm_create_devmem(struct vmctx * ctx,int segid,const char * name,size_t len)524 vm_create_devmem(struct vmctx *ctx, int segid, const char *name, size_t len)
525 {
526 char pathname[MAXPATHLEN];
527 size_t len2;
528 char *base, *ptr;
529 int fd, error, flags;
530
531 fd = -1;
532 ptr = MAP_FAILED;
533 if (name == NULL || strlen(name) == 0) {
534 errno = EINVAL;
535 goto done;
536 }
537
538 error = vm_alloc_memseg(ctx, segid, len, name);
539 if (error)
540 goto done;
541
542 strlcpy(pathname, "/dev/vmm.io/", sizeof(pathname));
543 strlcat(pathname, ctx->name, sizeof(pathname));
544 strlcat(pathname, ".", sizeof(pathname));
545 strlcat(pathname, name, sizeof(pathname));
546
547 fd = open(pathname, O_RDWR);
548 if (fd < 0)
549 goto done;
550
551 /*
552 * Stake out a contiguous region covering the device memory and the
553 * adjoining guard regions.
554 */
555 len2 = VM_MMAP_GUARD_SIZE + len + VM_MMAP_GUARD_SIZE;
556 base = mmap(NULL, len2, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1,
557 0);
558 if (base == MAP_FAILED)
559 goto done;
560
561 flags = MAP_SHARED | MAP_FIXED;
562 if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
563 flags |= MAP_NOCORE;
564
565 /* mmap the devmem region in the host address space */
566 ptr = mmap(base + VM_MMAP_GUARD_SIZE, len, PROT_RW, flags, fd, 0);
567 done:
568 if (fd >= 0)
569 close(fd);
570 return (ptr);
571 }
572
573 int
vm_set_desc(struct vmctx * ctx,int vcpu,int reg,uint64_t base,uint32_t limit,uint32_t access)574 vm_set_desc(struct vmctx *ctx, int vcpu, int reg,
575 uint64_t base, uint32_t limit, uint32_t access)
576 {
577 int error;
578 struct vm_seg_desc vmsegdesc;
579
580 bzero(&vmsegdesc, sizeof(vmsegdesc));
581 vmsegdesc.cpuid = vcpu;
582 vmsegdesc.regnum = reg;
583 vmsegdesc.desc.base = base;
584 vmsegdesc.desc.limit = limit;
585 vmsegdesc.desc.access = access;
586
587 error = ioctl(ctx->fd, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc);
588 return (error);
589 }
590
591 int
vm_get_desc(struct vmctx * ctx,int vcpu,int reg,uint64_t * base,uint32_t * limit,uint32_t * access)592 vm_get_desc(struct vmctx *ctx, int vcpu, int reg,
593 uint64_t *base, uint32_t *limit, uint32_t *access)
594 {
595 int error;
596 struct vm_seg_desc vmsegdesc;
597
598 bzero(&vmsegdesc, sizeof(vmsegdesc));
599 vmsegdesc.cpuid = vcpu;
600 vmsegdesc.regnum = reg;
601
602 error = ioctl(ctx->fd, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc);
603 if (error == 0) {
604 *base = vmsegdesc.desc.base;
605 *limit = vmsegdesc.desc.limit;
606 *access = vmsegdesc.desc.access;
607 }
608 return (error);
609 }
610
611 int
vm_get_seg_desc(struct vmctx * ctx,int vcpu,int reg,struct seg_desc * seg_desc)612 vm_get_seg_desc(struct vmctx *ctx, int vcpu, int reg, struct seg_desc *seg_desc)
613 {
614 int error;
615
616 error = vm_get_desc(ctx, vcpu, reg, &seg_desc->base, &seg_desc->limit,
617 &seg_desc->access);
618 return (error);
619 }
620
621 int
vm_set_register(struct vmctx * ctx,int vcpu,int reg,uint64_t val)622 vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val)
623 {
624 int error;
625 struct vm_register vmreg;
626
627 bzero(&vmreg, sizeof(vmreg));
628 vmreg.cpuid = vcpu;
629 vmreg.regnum = reg;
630 vmreg.regval = val;
631
632 error = ioctl(ctx->fd, VM_SET_REGISTER, &vmreg);
633 return (error);
634 }
635
636 int
vm_get_register(struct vmctx * ctx,int vcpu,int reg,uint64_t * ret_val)637 vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *ret_val)
638 {
639 int error;
640 struct vm_register vmreg;
641
642 bzero(&vmreg, sizeof(vmreg));
643 vmreg.cpuid = vcpu;
644 vmreg.regnum = reg;
645
646 error = ioctl(ctx->fd, VM_GET_REGISTER, &vmreg);
647 *ret_val = vmreg.regval;
648 return (error);
649 }
650
651 int
vm_set_register_set(struct vmctx * ctx,int vcpu,unsigned int count,const int * regnums,uint64_t * regvals)652 vm_set_register_set(struct vmctx *ctx, int vcpu, unsigned int count,
653 const int *regnums, uint64_t *regvals)
654 {
655 int error;
656 struct vm_register_set vmregset;
657
658 bzero(&vmregset, sizeof(vmregset));
659 vmregset.cpuid = vcpu;
660 vmregset.count = count;
661 vmregset.regnums = regnums;
662 vmregset.regvals = regvals;
663
664 error = ioctl(ctx->fd, VM_SET_REGISTER_SET, &vmregset);
665 return (error);
666 }
667
668 int
vm_get_register_set(struct vmctx * ctx,int vcpu,unsigned int count,const int * regnums,uint64_t * regvals)669 vm_get_register_set(struct vmctx *ctx, int vcpu, unsigned int count,
670 const int *regnums, uint64_t *regvals)
671 {
672 int error;
673 struct vm_register_set vmregset;
674
675 bzero(&vmregset, sizeof(vmregset));
676 vmregset.cpuid = vcpu;
677 vmregset.count = count;
678 vmregset.regnums = regnums;
679 vmregset.regvals = regvals;
680
681 error = ioctl(ctx->fd, VM_GET_REGISTER_SET, &vmregset);
682 return (error);
683 }
684
685 int
vm_run(struct vmctx * ctx,int vcpu,struct vm_exit * vmexit)686 vm_run(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit)
687 {
688 int error;
689 struct vm_run vmrun;
690
691 bzero(&vmrun, sizeof(vmrun));
692 vmrun.cpuid = vcpu;
693
694 error = ioctl(ctx->fd, VM_RUN, &vmrun);
695 bcopy(&vmrun.vm_exit, vmexit, sizeof(struct vm_exit));
696 return (error);
697 }
698
699 int
vm_suspend(struct vmctx * ctx,enum vm_suspend_how how)700 vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
701 {
702 struct vm_suspend vmsuspend;
703
704 bzero(&vmsuspend, sizeof(vmsuspend));
705 vmsuspend.how = how;
706 return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend));
707 }
708
709 int
vm_reinit(struct vmctx * ctx)710 vm_reinit(struct vmctx *ctx)
711 {
712
713 return (ioctl(ctx->fd, VM_REINIT, 0));
714 }
715
716 int
vm_inject_exception(struct vmctx * ctx,int vcpu,int vector,int errcode_valid,uint32_t errcode,int restart_instruction)717 vm_inject_exception(struct vmctx *ctx, int vcpu, int vector, int errcode_valid,
718 uint32_t errcode, int restart_instruction)
719 {
720 struct vm_exception exc;
721
722 exc.cpuid = vcpu;
723 exc.vector = vector;
724 exc.error_code = errcode;
725 exc.error_code_valid = errcode_valid;
726 exc.restart_instruction = restart_instruction;
727
728 return (ioctl(ctx->fd, VM_INJECT_EXCEPTION, &exc));
729 }
730
731 int
vm_apicid2vcpu(struct vmctx * ctx __unused,int apicid)732 vm_apicid2vcpu(struct vmctx *ctx __unused, int apicid)
733 {
734 /*
735 * The apic id associated with the 'vcpu' has the same numerical value
736 * as the 'vcpu' itself.
737 */
738 return (apicid);
739 }
740
741 int
vm_lapic_irq(struct vmctx * ctx,int vcpu,int vector)742 vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector)
743 {
744 struct vm_lapic_irq vmirq;
745
746 bzero(&vmirq, sizeof(vmirq));
747 vmirq.cpuid = vcpu;
748 vmirq.vector = vector;
749
750 return (ioctl(ctx->fd, VM_LAPIC_IRQ, &vmirq));
751 }
752
753 int
vm_lapic_local_irq(struct vmctx * ctx,int vcpu,int vector)754 vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector)
755 {
756 struct vm_lapic_irq vmirq;
757
758 bzero(&vmirq, sizeof(vmirq));
759 vmirq.cpuid = vcpu;
760 vmirq.vector = vector;
761
762 return (ioctl(ctx->fd, VM_LAPIC_LOCAL_IRQ, &vmirq));
763 }
764
765 int
vm_lapic_msi(struct vmctx * ctx,uint64_t addr,uint64_t msg)766 vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg)
767 {
768 struct vm_lapic_msi vmmsi;
769
770 bzero(&vmmsi, sizeof(vmmsi));
771 vmmsi.addr = addr;
772 vmmsi.msg = msg;
773
774 return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi));
775 }
776
777 int
vm_ioapic_assert_irq(struct vmctx * ctx,int irq)778 vm_ioapic_assert_irq(struct vmctx *ctx, int irq)
779 {
780 struct vm_ioapic_irq ioapic_irq;
781
782 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
783 ioapic_irq.irq = irq;
784
785 return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq));
786 }
787
788 int
vm_ioapic_deassert_irq(struct vmctx * ctx,int irq)789 vm_ioapic_deassert_irq(struct vmctx *ctx, int irq)
790 {
791 struct vm_ioapic_irq ioapic_irq;
792
793 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
794 ioapic_irq.irq = irq;
795
796 return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq));
797 }
798
799 int
vm_ioapic_pulse_irq(struct vmctx * ctx,int irq)800 vm_ioapic_pulse_irq(struct vmctx *ctx, int irq)
801 {
802 struct vm_ioapic_irq ioapic_irq;
803
804 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
805 ioapic_irq.irq = irq;
806
807 return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq));
808 }
809
810 int
vm_ioapic_pincount(struct vmctx * ctx,int * pincount)811 vm_ioapic_pincount(struct vmctx *ctx, int *pincount)
812 {
813
814 return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount));
815 }
816
817 int
vm_readwrite_kernemu_device(struct vmctx * ctx,int vcpu,vm_paddr_t gpa,bool write,int size,uint64_t * value)818 vm_readwrite_kernemu_device(struct vmctx *ctx, int vcpu, vm_paddr_t gpa,
819 bool write, int size, uint64_t *value)
820 {
821 struct vm_readwrite_kernemu_device irp = {
822 .vcpuid = vcpu,
823 .access_width = fls(size) - 1,
824 .gpa = gpa,
825 .value = write ? *value : ~0ul,
826 };
827 long cmd = (write ? VM_SET_KERNEMU_DEV : VM_GET_KERNEMU_DEV);
828 int rc;
829
830 rc = ioctl(ctx->fd, cmd, &irp);
831 if (rc == 0 && !write)
832 *value = irp.value;
833 return (rc);
834 }
835
836 int
vm_isa_assert_irq(struct vmctx * ctx,int atpic_irq,int ioapic_irq)837 vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
838 {
839 struct vm_isa_irq isa_irq;
840
841 bzero(&isa_irq, sizeof(struct vm_isa_irq));
842 isa_irq.atpic_irq = atpic_irq;
843 isa_irq.ioapic_irq = ioapic_irq;
844
845 return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq));
846 }
847
848 int
vm_isa_deassert_irq(struct vmctx * ctx,int atpic_irq,int ioapic_irq)849 vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
850 {
851 struct vm_isa_irq isa_irq;
852
853 bzero(&isa_irq, sizeof(struct vm_isa_irq));
854 isa_irq.atpic_irq = atpic_irq;
855 isa_irq.ioapic_irq = ioapic_irq;
856
857 return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq));
858 }
859
860 int
vm_isa_pulse_irq(struct vmctx * ctx,int atpic_irq,int ioapic_irq)861 vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
862 {
863 struct vm_isa_irq isa_irq;
864
865 bzero(&isa_irq, sizeof(struct vm_isa_irq));
866 isa_irq.atpic_irq = atpic_irq;
867 isa_irq.ioapic_irq = ioapic_irq;
868
869 return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq));
870 }
871
872 int
vm_isa_set_irq_trigger(struct vmctx * ctx,int atpic_irq,enum vm_intr_trigger trigger)873 vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq,
874 enum vm_intr_trigger trigger)
875 {
876 struct vm_isa_irq_trigger isa_irq_trigger;
877
878 bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger));
879 isa_irq_trigger.atpic_irq = atpic_irq;
880 isa_irq_trigger.trigger = trigger;
881
882 return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger));
883 }
884
885 int
vm_inject_nmi(struct vmctx * ctx,int vcpu)886 vm_inject_nmi(struct vmctx *ctx, int vcpu)
887 {
888 struct vm_nmi vmnmi;
889
890 bzero(&vmnmi, sizeof(vmnmi));
891 vmnmi.cpuid = vcpu;
892
893 return (ioctl(ctx->fd, VM_INJECT_NMI, &vmnmi));
894 }
895
896 static const char *capstrmap[] = {
897 [VM_CAP_HALT_EXIT] = "hlt_exit",
898 [VM_CAP_MTRAP_EXIT] = "mtrap_exit",
899 [VM_CAP_PAUSE_EXIT] = "pause_exit",
900 [VM_CAP_UNRESTRICTED_GUEST] = "unrestricted_guest",
901 [VM_CAP_ENABLE_INVPCID] = "enable_invpcid",
902 [VM_CAP_BPT_EXIT] = "bpt_exit",
903 };
904
905 int
vm_capability_name2type(const char * capname)906 vm_capability_name2type(const char *capname)
907 {
908 int i;
909
910 for (i = 0; i < (int)nitems(capstrmap); i++) {
911 if (strcmp(capstrmap[i], capname) == 0)
912 return (i);
913 }
914
915 return (-1);
916 }
917
918 const char *
vm_capability_type2name(int type)919 vm_capability_type2name(int type)
920 {
921 if (type >= 0 && type < (int)nitems(capstrmap))
922 return (capstrmap[type]);
923
924 return (NULL);
925 }
926
927 int
vm_get_capability(struct vmctx * ctx,int vcpu,enum vm_cap_type cap,int * retval)928 vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap,
929 int *retval)
930 {
931 int error;
932 struct vm_capability vmcap;
933
934 bzero(&vmcap, sizeof(vmcap));
935 vmcap.cpuid = vcpu;
936 vmcap.captype = cap;
937
938 error = ioctl(ctx->fd, VM_GET_CAPABILITY, &vmcap);
939 *retval = vmcap.capval;
940 return (error);
941 }
942
943 int
vm_set_capability(struct vmctx * ctx,int vcpu,enum vm_cap_type cap,int val)944 vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, int val)
945 {
946 struct vm_capability vmcap;
947
948 bzero(&vmcap, sizeof(vmcap));
949 vmcap.cpuid = vcpu;
950 vmcap.captype = cap;
951 vmcap.capval = val;
952
953 return (ioctl(ctx->fd, VM_SET_CAPABILITY, &vmcap));
954 }
955
956 int
vm_assign_pptdev(struct vmctx * ctx,int bus,int slot,int func)957 vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
958 {
959 struct vm_pptdev pptdev;
960
961 bzero(&pptdev, sizeof(pptdev));
962 pptdev.bus = bus;
963 pptdev.slot = slot;
964 pptdev.func = func;
965
966 return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
967 }
968
969 int
vm_unassign_pptdev(struct vmctx * ctx,int bus,int slot,int func)970 vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
971 {
972 struct vm_pptdev pptdev;
973
974 bzero(&pptdev, sizeof(pptdev));
975 pptdev.bus = bus;
976 pptdev.slot = slot;
977 pptdev.func = func;
978
979 return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
980 }
981
982 int
vm_map_pptdev_mmio(struct vmctx * ctx,int bus,int slot,int func,vm_paddr_t gpa,size_t len,vm_paddr_t hpa)983 vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
984 vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
985 {
986 struct vm_pptdev_mmio pptmmio;
987
988 bzero(&pptmmio, sizeof(pptmmio));
989 pptmmio.bus = bus;
990 pptmmio.slot = slot;
991 pptmmio.func = func;
992 pptmmio.gpa = gpa;
993 pptmmio.len = len;
994 pptmmio.hpa = hpa;
995
996 return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
997 }
998
999 int
vm_unmap_pptdev_mmio(struct vmctx * ctx,int bus,int slot,int func,vm_paddr_t gpa,size_t len)1000 vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
1001 vm_paddr_t gpa, size_t len)
1002 {
1003 struct vm_pptdev_mmio pptmmio;
1004
1005 bzero(&pptmmio, sizeof(pptmmio));
1006 pptmmio.bus = bus;
1007 pptmmio.slot = slot;
1008 pptmmio.func = func;
1009 pptmmio.gpa = gpa;
1010 pptmmio.len = len;
1011
1012 return (ioctl(ctx->fd, VM_UNMAP_PPTDEV_MMIO, &pptmmio));
1013 }
1014
1015 int
vm_setup_pptdev_msi(struct vmctx * ctx,int vcpu,int bus,int slot,int func,uint64_t addr,uint64_t msg,int numvec)1016 vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
1017 uint64_t addr, uint64_t msg, int numvec)
1018 {
1019 struct vm_pptdev_msi pptmsi;
1020
1021 bzero(&pptmsi, sizeof(pptmsi));
1022 pptmsi.vcpu = vcpu;
1023 pptmsi.bus = bus;
1024 pptmsi.slot = slot;
1025 pptmsi.func = func;
1026 pptmsi.msg = msg;
1027 pptmsi.addr = addr;
1028 pptmsi.numvec = numvec;
1029
1030 return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
1031 }
1032
1033 int
vm_setup_pptdev_msix(struct vmctx * ctx,int vcpu,int bus,int slot,int func,int idx,uint64_t addr,uint64_t msg,uint32_t vector_control)1034 vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
1035 int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
1036 {
1037 struct vm_pptdev_msix pptmsix;
1038
1039 bzero(&pptmsix, sizeof(pptmsix));
1040 pptmsix.vcpu = vcpu;
1041 pptmsix.bus = bus;
1042 pptmsix.slot = slot;
1043 pptmsix.func = func;
1044 pptmsix.idx = idx;
1045 pptmsix.msg = msg;
1046 pptmsix.addr = addr;
1047 pptmsix.vector_control = vector_control;
1048
1049 return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
1050 }
1051
1052 int
vm_disable_pptdev_msix(struct vmctx * ctx,int bus,int slot,int func)1053 vm_disable_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func)
1054 {
1055 struct vm_pptdev ppt;
1056
1057 bzero(&ppt, sizeof(ppt));
1058 ppt.bus = bus;
1059 ppt.slot = slot;
1060 ppt.func = func;
1061
1062 return ioctl(ctx->fd, VM_PPTDEV_DISABLE_MSIX, &ppt);
1063 }
1064
1065 uint64_t *
vm_get_stats(struct vmctx * ctx,int vcpu,struct timeval * ret_tv,int * ret_entries)1066 vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv,
1067 int *ret_entries)
1068 {
1069 int error;
1070
1071 static struct vm_stats vmstats;
1072
1073 vmstats.cpuid = vcpu;
1074
1075 error = ioctl(ctx->fd, VM_STATS, &vmstats);
1076 if (error == 0) {
1077 if (ret_entries)
1078 *ret_entries = vmstats.num_entries;
1079 if (ret_tv)
1080 *ret_tv = vmstats.tv;
1081 return (vmstats.statbuf);
1082 } else
1083 return (NULL);
1084 }
1085
1086 const char *
vm_get_stat_desc(struct vmctx * ctx,int index)1087 vm_get_stat_desc(struct vmctx *ctx, int index)
1088 {
1089 static struct vm_stat_desc statdesc;
1090
1091 statdesc.index = index;
1092 if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0)
1093 return (statdesc.desc);
1094 else
1095 return (NULL);
1096 }
1097
1098 int
vm_get_x2apic_state(struct vmctx * ctx,int vcpu,enum x2apic_state * state)1099 vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *state)
1100 {
1101 int error;
1102 struct vm_x2apic x2apic;
1103
1104 bzero(&x2apic, sizeof(x2apic));
1105 x2apic.cpuid = vcpu;
1106
1107 error = ioctl(ctx->fd, VM_GET_X2APIC_STATE, &x2apic);
1108 *state = x2apic.state;
1109 return (error);
1110 }
1111
1112 int
vm_set_x2apic_state(struct vmctx * ctx,int vcpu,enum x2apic_state state)1113 vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state state)
1114 {
1115 int error;
1116 struct vm_x2apic x2apic;
1117
1118 bzero(&x2apic, sizeof(x2apic));
1119 x2apic.cpuid = vcpu;
1120 x2apic.state = state;
1121
1122 error = ioctl(ctx->fd, VM_SET_X2APIC_STATE, &x2apic);
1123
1124 return (error);
1125 }
1126
1127 /*
1128 * From Intel Vol 3a:
1129 * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT
1130 */
1131 int
vcpu_reset(struct vmctx * vmctx,int vcpu)1132 vcpu_reset(struct vmctx *vmctx, int vcpu)
1133 {
1134 int error;
1135 uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx;
1136 uint32_t desc_access, desc_limit;
1137 uint16_t sel;
1138
1139 zero = 0;
1140
1141 rflags = 0x2;
1142 error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags);
1143 if (error)
1144 goto done;
1145
1146 rip = 0xfff0;
1147 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, rip)) != 0)
1148 goto done;
1149
1150 cr0 = CR0_NE;
1151 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
1152 goto done;
1153
1154 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR3, zero)) != 0)
1155 goto done;
1156
1157 cr4 = 0;
1158 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
1159 goto done;
1160
1161 /*
1162 * CS: present, r/w, accessed, 16-bit, byte granularity, usable
1163 */
1164 desc_base = 0xffff0000;
1165 desc_limit = 0xffff;
1166 desc_access = 0x0093;
1167 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS,
1168 desc_base, desc_limit, desc_access);
1169 if (error)
1170 goto done;
1171
1172 sel = 0xf000;
1173 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, sel)) != 0)
1174 goto done;
1175
1176 /*
1177 * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity
1178 */
1179 desc_base = 0;
1180 desc_limit = 0xffff;
1181 desc_access = 0x0093;
1182 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS,
1183 desc_base, desc_limit, desc_access);
1184 if (error)
1185 goto done;
1186
1187 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS,
1188 desc_base, desc_limit, desc_access);
1189 if (error)
1190 goto done;
1191
1192 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES,
1193 desc_base, desc_limit, desc_access);
1194 if (error)
1195 goto done;
1196
1197 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS,
1198 desc_base, desc_limit, desc_access);
1199 if (error)
1200 goto done;
1201
1202 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS,
1203 desc_base, desc_limit, desc_access);
1204 if (error)
1205 goto done;
1206
1207 sel = 0;
1208 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, sel)) != 0)
1209 goto done;
1210 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, sel)) != 0)
1211 goto done;
1212 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, sel)) != 0)
1213 goto done;
1214 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, sel)) != 0)
1215 goto done;
1216 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, sel)) != 0)
1217 goto done;
1218
1219 /* General purpose registers */
1220 rdx = 0xf00;
1221 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RAX, zero)) != 0)
1222 goto done;
1223 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBX, zero)) != 0)
1224 goto done;
1225 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RCX, zero)) != 0)
1226 goto done;
1227 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDX, rdx)) != 0)
1228 goto done;
1229 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSI, zero)) != 0)
1230 goto done;
1231 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDI, zero)) != 0)
1232 goto done;
1233 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBP, zero)) != 0)
1234 goto done;
1235 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, zero)) != 0)
1236 goto done;
1237
1238 /* GDTR, IDTR */
1239 desc_base = 0;
1240 desc_limit = 0xffff;
1241 desc_access = 0;
1242 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR,
1243 desc_base, desc_limit, desc_access);
1244 if (error != 0)
1245 goto done;
1246
1247 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_IDTR,
1248 desc_base, desc_limit, desc_access);
1249 if (error != 0)
1250 goto done;
1251
1252 /* TR */
1253 desc_base = 0;
1254 desc_limit = 0xffff;
1255 desc_access = 0x0000008b;
1256 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
1257 if (error)
1258 goto done;
1259
1260 sel = 0;
1261 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, sel)) != 0)
1262 goto done;
1263
1264 /* LDTR */
1265 desc_base = 0;
1266 desc_limit = 0xffff;
1267 desc_access = 0x00000082;
1268 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, desc_base,
1269 desc_limit, desc_access);
1270 if (error)
1271 goto done;
1272
1273 sel = 0;
1274 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
1275 goto done;
1276
1277 /* XXX cr2, debug registers */
1278
1279 error = 0;
1280 done:
1281 return (error);
1282 }
1283
1284 int
vm_get_gpa_pmap(struct vmctx * ctx,uint64_t gpa,uint64_t * pte,int * num)1285 vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num)
1286 {
1287 int error, i;
1288 struct vm_gpa_pte gpapte;
1289
1290 bzero(&gpapte, sizeof(gpapte));
1291 gpapte.gpa = gpa;
1292
1293 error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte);
1294
1295 if (error == 0) {
1296 *num = gpapte.ptenum;
1297 for (i = 0; i < gpapte.ptenum; i++)
1298 pte[i] = gpapte.pte[i];
1299 }
1300
1301 return (error);
1302 }
1303
1304 int
vm_get_hpet_capabilities(struct vmctx * ctx,uint32_t * capabilities)1305 vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities)
1306 {
1307 int error;
1308 struct vm_hpet_cap cap;
1309
1310 bzero(&cap, sizeof(struct vm_hpet_cap));
1311 error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap);
1312 if (capabilities != NULL)
1313 *capabilities = cap.capabilities;
1314 return (error);
1315 }
1316
1317 int
vm_gla2gpa(struct vmctx * ctx,int vcpu,struct vm_guest_paging * paging,uint64_t gla,int prot,uint64_t * gpa,int * fault)1318 vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1319 uint64_t gla, int prot, uint64_t *gpa, int *fault)
1320 {
1321 struct vm_gla2gpa gg;
1322 int error;
1323
1324 bzero(&gg, sizeof(struct vm_gla2gpa));
1325 gg.vcpuid = vcpu;
1326 gg.prot = prot;
1327 gg.gla = gla;
1328 gg.paging = *paging;
1329
1330 error = ioctl(ctx->fd, VM_GLA2GPA, &gg);
1331 if (error == 0) {
1332 *fault = gg.fault;
1333 *gpa = gg.gpa;
1334 }
1335 return (error);
1336 }
1337
1338 int
vm_gla2gpa_nofault(struct vmctx * ctx,int vcpu,struct vm_guest_paging * paging,uint64_t gla,int prot,uint64_t * gpa,int * fault)1339 vm_gla2gpa_nofault(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1340 uint64_t gla, int prot, uint64_t *gpa, int *fault)
1341 {
1342 struct vm_gla2gpa gg;
1343 int error;
1344
1345 bzero(&gg, sizeof(struct vm_gla2gpa));
1346 gg.vcpuid = vcpu;
1347 gg.prot = prot;
1348 gg.gla = gla;
1349 gg.paging = *paging;
1350
1351 error = ioctl(ctx->fd, VM_GLA2GPA_NOFAULT, &gg);
1352 if (error == 0) {
1353 *fault = gg.fault;
1354 *gpa = gg.gpa;
1355 }
1356 return (error);
1357 }
1358
1359 #ifndef min
1360 #define min(a,b) (((a) < (b)) ? (a) : (b))
1361 #endif
1362
1363 int
vm_copy_setup(struct vmctx * ctx,int vcpu,struct vm_guest_paging * paging,uint64_t gla,size_t len,int prot,struct iovec * iov,int iovcnt,int * fault)1364 vm_copy_setup(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1365 uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt,
1366 int *fault)
1367 {
1368 void *va;
1369 uint64_t gpa, off;
1370 int error, i, n;
1371
1372 for (i = 0; i < iovcnt; i++) {
1373 iov[i].iov_base = 0;
1374 iov[i].iov_len = 0;
1375 }
1376
1377 while (len) {
1378 assert(iovcnt > 0);
1379 error = vm_gla2gpa(ctx, vcpu, paging, gla, prot, &gpa, fault);
1380 if (error || *fault)
1381 return (error);
1382
1383 off = gpa & PAGE_MASK;
1384 n = MIN(len, PAGE_SIZE - off);
1385
1386 va = vm_map_gpa(ctx, gpa, n);
1387 if (va == NULL)
1388 return (EFAULT);
1389
1390 iov->iov_base = va;
1391 iov->iov_len = n;
1392 iov++;
1393 iovcnt--;
1394
1395 gla += n;
1396 len -= n;
1397 }
1398 return (0);
1399 }
1400
1401 void
vm_copy_teardown(struct vmctx * ctx __unused,int vcpu __unused,struct iovec * iov __unused,int iovcnt __unused)1402 vm_copy_teardown(struct vmctx *ctx __unused, int vcpu __unused,
1403 struct iovec *iov __unused, int iovcnt __unused)
1404 {
1405 }
1406
1407 void
vm_copyin(struct vmctx * ctx __unused,int vcpu __unused,struct iovec * iov,void * vp,size_t len)1408 vm_copyin(struct vmctx *ctx __unused, int vcpu __unused, struct iovec *iov,
1409 void *vp, size_t len)
1410 {
1411 const char *src;
1412 char *dst;
1413 size_t n;
1414
1415 dst = vp;
1416 while (len) {
1417 assert(iov->iov_len);
1418 n = min(len, iov->iov_len);
1419 src = iov->iov_base;
1420 bcopy(src, dst, n);
1421
1422 iov++;
1423 dst += n;
1424 len -= n;
1425 }
1426 }
1427
1428 void
vm_copyout(struct vmctx * ctx __unused,int vcpu __unused,const void * vp,struct iovec * iov,size_t len)1429 vm_copyout(struct vmctx *ctx __unused, int vcpu __unused, const void *vp,
1430 struct iovec *iov, size_t len)
1431 {
1432 const char *src;
1433 char *dst;
1434 size_t n;
1435
1436 src = vp;
1437 while (len) {
1438 assert(iov->iov_len);
1439 n = min(len, iov->iov_len);
1440 dst = iov->iov_base;
1441 bcopy(src, dst, n);
1442
1443 iov++;
1444 src += n;
1445 len -= n;
1446 }
1447 }
1448
1449 static int
vm_get_cpus(struct vmctx * ctx,int which,cpuset_t * cpus)1450 vm_get_cpus(struct vmctx *ctx, int which, cpuset_t *cpus)
1451 {
1452 struct vm_cpuset vm_cpuset;
1453 int error;
1454
1455 bzero(&vm_cpuset, sizeof(struct vm_cpuset));
1456 vm_cpuset.which = which;
1457 vm_cpuset.cpusetsize = sizeof(cpuset_t);
1458 vm_cpuset.cpus = cpus;
1459
1460 error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset);
1461 return (error);
1462 }
1463
1464 int
vm_active_cpus(struct vmctx * ctx,cpuset_t * cpus)1465 vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus)
1466 {
1467
1468 return (vm_get_cpus(ctx, VM_ACTIVE_CPUS, cpus));
1469 }
1470
1471 int
vm_suspended_cpus(struct vmctx * ctx,cpuset_t * cpus)1472 vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus)
1473 {
1474
1475 return (vm_get_cpus(ctx, VM_SUSPENDED_CPUS, cpus));
1476 }
1477
1478 int
vm_debug_cpus(struct vmctx * ctx,cpuset_t * cpus)1479 vm_debug_cpus(struct vmctx *ctx, cpuset_t *cpus)
1480 {
1481
1482 return (vm_get_cpus(ctx, VM_DEBUG_CPUS, cpus));
1483 }
1484
1485 int
vm_activate_cpu(struct vmctx * ctx,int vcpu)1486 vm_activate_cpu(struct vmctx *ctx, int vcpu)
1487 {
1488 struct vm_activate_cpu ac;
1489 int error;
1490
1491 bzero(&ac, sizeof(struct vm_activate_cpu));
1492 ac.vcpuid = vcpu;
1493 error = ioctl(ctx->fd, VM_ACTIVATE_CPU, &ac);
1494 return (error);
1495 }
1496
1497 int
vm_suspend_cpu(struct vmctx * ctx,int vcpu)1498 vm_suspend_cpu(struct vmctx *ctx, int vcpu)
1499 {
1500 struct vm_activate_cpu ac;
1501 int error;
1502
1503 bzero(&ac, sizeof(struct vm_activate_cpu));
1504 ac.vcpuid = vcpu;
1505 error = ioctl(ctx->fd, VM_SUSPEND_CPU, &ac);
1506 return (error);
1507 }
1508
1509 int
vm_resume_cpu(struct vmctx * ctx,int vcpu)1510 vm_resume_cpu(struct vmctx *ctx, int vcpu)
1511 {
1512 struct vm_activate_cpu ac;
1513 int error;
1514
1515 bzero(&ac, sizeof(struct vm_activate_cpu));
1516 ac.vcpuid = vcpu;
1517 error = ioctl(ctx->fd, VM_RESUME_CPU, &ac);
1518 return (error);
1519 }
1520
1521 int
vm_get_intinfo(struct vmctx * ctx,int vcpu,uint64_t * info1,uint64_t * info2)1522 vm_get_intinfo(struct vmctx *ctx, int vcpu, uint64_t *info1, uint64_t *info2)
1523 {
1524 struct vm_intinfo vmii;
1525 int error;
1526
1527 bzero(&vmii, sizeof(struct vm_intinfo));
1528 vmii.vcpuid = vcpu;
1529 error = ioctl(ctx->fd, VM_GET_INTINFO, &vmii);
1530 if (error == 0) {
1531 *info1 = vmii.info1;
1532 *info2 = vmii.info2;
1533 }
1534 return (error);
1535 }
1536
1537 int
vm_set_intinfo(struct vmctx * ctx,int vcpu,uint64_t info1)1538 vm_set_intinfo(struct vmctx *ctx, int vcpu, uint64_t info1)
1539 {
1540 struct vm_intinfo vmii;
1541 int error;
1542
1543 bzero(&vmii, sizeof(struct vm_intinfo));
1544 vmii.vcpuid = vcpu;
1545 vmii.info1 = info1;
1546 error = ioctl(ctx->fd, VM_SET_INTINFO, &vmii);
1547 return (error);
1548 }
1549
1550 int
vm_rtc_write(struct vmctx * ctx,int offset,uint8_t value)1551 vm_rtc_write(struct vmctx *ctx, int offset, uint8_t value)
1552 {
1553 struct vm_rtc_data rtcdata;
1554 int error;
1555
1556 bzero(&rtcdata, sizeof(struct vm_rtc_data));
1557 rtcdata.offset = offset;
1558 rtcdata.value = value;
1559 error = ioctl(ctx->fd, VM_RTC_WRITE, &rtcdata);
1560 return (error);
1561 }
1562
1563 int
vm_rtc_read(struct vmctx * ctx,int offset,uint8_t * retval)1564 vm_rtc_read(struct vmctx *ctx, int offset, uint8_t *retval)
1565 {
1566 struct vm_rtc_data rtcdata;
1567 int error;
1568
1569 bzero(&rtcdata, sizeof(struct vm_rtc_data));
1570 rtcdata.offset = offset;
1571 error = ioctl(ctx->fd, VM_RTC_READ, &rtcdata);
1572 if (error == 0)
1573 *retval = rtcdata.value;
1574 return (error);
1575 }
1576
1577 int
vm_rtc_settime(struct vmctx * ctx,time_t secs)1578 vm_rtc_settime(struct vmctx *ctx, time_t secs)
1579 {
1580 struct vm_rtc_time rtctime;
1581 int error;
1582
1583 bzero(&rtctime, sizeof(struct vm_rtc_time));
1584 rtctime.secs = secs;
1585 error = ioctl(ctx->fd, VM_RTC_SETTIME, &rtctime);
1586 return (error);
1587 }
1588
1589 int
vm_rtc_gettime(struct vmctx * ctx,time_t * secs)1590 vm_rtc_gettime(struct vmctx *ctx, time_t *secs)
1591 {
1592 struct vm_rtc_time rtctime;
1593 int error;
1594
1595 bzero(&rtctime, sizeof(struct vm_rtc_time));
1596 error = ioctl(ctx->fd, VM_RTC_GETTIME, &rtctime);
1597 if (error == 0)
1598 *secs = rtctime.secs;
1599 return (error);
1600 }
1601
1602 int
vm_restart_instruction(void * arg,int vcpu)1603 vm_restart_instruction(void *arg, int vcpu)
1604 {
1605 struct vmctx *ctx = arg;
1606
1607 return (ioctl(ctx->fd, VM_RESTART_INSTRUCTION, &vcpu));
1608 }
1609
1610 int
vm_snapshot_req(struct vm_snapshot_meta * meta)1611 vm_snapshot_req(struct vm_snapshot_meta *meta)
1612 {
1613
1614 if (ioctl(meta->ctx->fd, VM_SNAPSHOT_REQ, meta) == -1) {
1615 #ifdef SNAPSHOT_DEBUG
1616 fprintf(stderr, "%s: snapshot failed for %s: %d\r\n",
1617 __func__, meta->dev_name, errno);
1618 #endif
1619 return (-1);
1620 }
1621 return (0);
1622 }
1623
1624 int
vm_restore_time(struct vmctx * ctx)1625 vm_restore_time(struct vmctx *ctx)
1626 {
1627 int dummy;
1628
1629 dummy = 0;
1630 return (ioctl(ctx->fd, VM_RESTORE_TIME, &dummy));
1631 }
1632
1633 int
vm_set_topology(struct vmctx * ctx,uint16_t sockets,uint16_t cores,uint16_t threads,uint16_t maxcpus)1634 vm_set_topology(struct vmctx *ctx,
1635 uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus)
1636 {
1637 struct vm_cpu_topology topology;
1638
1639 bzero(&topology, sizeof (struct vm_cpu_topology));
1640 topology.sockets = sockets;
1641 topology.cores = cores;
1642 topology.threads = threads;
1643 topology.maxcpus = maxcpus;
1644 return (ioctl(ctx->fd, VM_SET_TOPOLOGY, &topology));
1645 }
1646
1647 int
vm_get_topology(struct vmctx * ctx,uint16_t * sockets,uint16_t * cores,uint16_t * threads,uint16_t * maxcpus)1648 vm_get_topology(struct vmctx *ctx,
1649 uint16_t *sockets, uint16_t *cores, uint16_t *threads, uint16_t *maxcpus)
1650 {
1651 struct vm_cpu_topology topology;
1652 int error;
1653
1654 bzero(&topology, sizeof (struct vm_cpu_topology));
1655 error = ioctl(ctx->fd, VM_GET_TOPOLOGY, &topology);
1656 if (error == 0) {
1657 *sockets = topology.sockets;
1658 *cores = topology.cores;
1659 *threads = topology.threads;
1660 *maxcpus = topology.maxcpus;
1661 }
1662 return (error);
1663 }
1664
1665 int
vm_get_device_fd(struct vmctx * ctx)1666 vm_get_device_fd(struct vmctx *ctx)
1667 {
1668
1669 return (ctx->fd);
1670 }
1671
1672 const cap_ioctl_t *
vm_get_ioctls(size_t * len)1673 vm_get_ioctls(size_t *len)
1674 {
1675 cap_ioctl_t *cmds;
1676 /* keep in sync with machine/vmm_dev.h */
1677 static const cap_ioctl_t vm_ioctl_cmds[] = { VM_RUN, VM_SUSPEND, VM_REINIT,
1678 VM_ALLOC_MEMSEG, VM_GET_MEMSEG, VM_MMAP_MEMSEG, VM_MMAP_MEMSEG,
1679 VM_MMAP_GETNEXT, VM_MUNMAP_MEMSEG, VM_SET_REGISTER, VM_GET_REGISTER,
1680 VM_SET_SEGMENT_DESCRIPTOR, VM_GET_SEGMENT_DESCRIPTOR,
1681 VM_SET_REGISTER_SET, VM_GET_REGISTER_SET,
1682 VM_SET_KERNEMU_DEV, VM_GET_KERNEMU_DEV,
1683 VM_INJECT_EXCEPTION, VM_LAPIC_IRQ, VM_LAPIC_LOCAL_IRQ,
1684 VM_LAPIC_MSI, VM_IOAPIC_ASSERT_IRQ, VM_IOAPIC_DEASSERT_IRQ,
1685 VM_IOAPIC_PULSE_IRQ, VM_IOAPIC_PINCOUNT, VM_ISA_ASSERT_IRQ,
1686 VM_ISA_DEASSERT_IRQ, VM_ISA_PULSE_IRQ, VM_ISA_SET_IRQ_TRIGGER,
1687 VM_SET_CAPABILITY, VM_GET_CAPABILITY, VM_BIND_PPTDEV,
1688 VM_UNBIND_PPTDEV, VM_MAP_PPTDEV_MMIO, VM_PPTDEV_MSI,
1689 VM_PPTDEV_MSIX, VM_UNMAP_PPTDEV_MMIO, VM_PPTDEV_DISABLE_MSIX,
1690 VM_INJECT_NMI, VM_STATS, VM_STAT_DESC,
1691 VM_SET_X2APIC_STATE, VM_GET_X2APIC_STATE,
1692 VM_GET_HPET_CAPABILITIES, VM_GET_GPA_PMAP, VM_GLA2GPA,
1693 VM_GLA2GPA_NOFAULT,
1694 VM_ACTIVATE_CPU, VM_GET_CPUS, VM_SUSPEND_CPU, VM_RESUME_CPU,
1695 VM_SET_INTINFO, VM_GET_INTINFO,
1696 VM_RTC_WRITE, VM_RTC_READ, VM_RTC_SETTIME, VM_RTC_GETTIME,
1697 VM_RESTART_INSTRUCTION, VM_SET_TOPOLOGY, VM_GET_TOPOLOGY };
1698
1699 if (len == NULL) {
1700 cmds = malloc(sizeof(vm_ioctl_cmds));
1701 if (cmds == NULL)
1702 return (NULL);
1703 bcopy(vm_ioctl_cmds, cmds, sizeof(vm_ioctl_cmds));
1704 return (cmds);
1705 }
1706
1707 *len = nitems(vm_ioctl_cmds);
1708 return (NULL);
1709 }
1710