1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2011 NetApp, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30
31 #ifndef _VMM_H_
32 #define _VMM_H_
33
34 #include <sys/sdt.h>
35 #include <x86/segments.h>
36
37 struct vm_snapshot_meta;
38
39 #ifdef _KERNEL
40 SDT_PROVIDER_DECLARE(vmm);
41 #endif
42
43 enum vm_suspend_how {
44 VM_SUSPEND_NONE,
45 VM_SUSPEND_RESET,
46 VM_SUSPEND_POWEROFF,
47 VM_SUSPEND_HALT,
48 VM_SUSPEND_TRIPLEFAULT,
49 VM_SUSPEND_LAST
50 };
51
52 /*
53 * Identifiers for architecturally defined registers.
54 */
55 enum vm_reg_name {
56 VM_REG_GUEST_RAX,
57 VM_REG_GUEST_RBX,
58 VM_REG_GUEST_RCX,
59 VM_REG_GUEST_RDX,
60 VM_REG_GUEST_RSI,
61 VM_REG_GUEST_RDI,
62 VM_REG_GUEST_RBP,
63 VM_REG_GUEST_R8,
64 VM_REG_GUEST_R9,
65 VM_REG_GUEST_R10,
66 VM_REG_GUEST_R11,
67 VM_REG_GUEST_R12,
68 VM_REG_GUEST_R13,
69 VM_REG_GUEST_R14,
70 VM_REG_GUEST_R15,
71 VM_REG_GUEST_CR0,
72 VM_REG_GUEST_CR3,
73 VM_REG_GUEST_CR4,
74 VM_REG_GUEST_DR7,
75 VM_REG_GUEST_RSP,
76 VM_REG_GUEST_RIP,
77 VM_REG_GUEST_RFLAGS,
78 VM_REG_GUEST_ES,
79 VM_REG_GUEST_CS,
80 VM_REG_GUEST_SS,
81 VM_REG_GUEST_DS,
82 VM_REG_GUEST_FS,
83 VM_REG_GUEST_GS,
84 VM_REG_GUEST_LDTR,
85 VM_REG_GUEST_TR,
86 VM_REG_GUEST_IDTR,
87 VM_REG_GUEST_GDTR,
88 VM_REG_GUEST_EFER,
89 VM_REG_GUEST_CR2,
90 VM_REG_GUEST_PDPTE0,
91 VM_REG_GUEST_PDPTE1,
92 VM_REG_GUEST_PDPTE2,
93 VM_REG_GUEST_PDPTE3,
94 VM_REG_GUEST_INTR_SHADOW,
95 VM_REG_GUEST_DR0,
96 VM_REG_GUEST_DR1,
97 VM_REG_GUEST_DR2,
98 VM_REG_GUEST_DR3,
99 VM_REG_GUEST_DR6,
100 VM_REG_GUEST_ENTRY_INST_LENGTH,
101 VM_REG_LAST
102 };
103
104 enum x2apic_state {
105 X2APIC_DISABLED,
106 X2APIC_ENABLED,
107 X2APIC_STATE_LAST
108 };
109
110 #define VM_INTINFO_VECTOR(info) ((info) & 0xff)
111 #define VM_INTINFO_DEL_ERRCODE 0x800
112 #define VM_INTINFO_RSVD 0x7ffff000
113 #define VM_INTINFO_VALID 0x80000000
114 #define VM_INTINFO_TYPE 0x700
115 #define VM_INTINFO_HWINTR (0 << 8)
116 #define VM_INTINFO_NMI (2 << 8)
117 #define VM_INTINFO_HWEXCEPTION (3 << 8)
118 #define VM_INTINFO_SWINTR (4 << 8)
119
120 /*
121 * The VM name has to fit into the pathname length constraints of devfs,
122 * governed primarily by SPECNAMELEN. The length is the total number of
123 * characters in the full path, relative to the mount point and not
124 * including any leading '/' characters.
125 * A prefix and a suffix are added to the name specified by the user.
126 * The prefix is usually "vmm/" or "vmm.io/", but can be a few characters
127 * longer for future use.
128 * The suffix is a string that identifies a bootrom image or some similar
129 * image that is attached to the VM. A separator character gets added to
130 * the suffix automatically when generating the full path, so it must be
131 * accounted for, reducing the effective length by 1.
132 * The effective length of a VM name is 229 bytes for FreeBSD 13 and 37
133 * bytes for FreeBSD 12. A minimum length is set for safety and supports
134 * a SPECNAMELEN as small as 32 on old systems.
135 */
136 #define VM_MAX_PREFIXLEN 10
137 #define VM_MAX_SUFFIXLEN 15
138 #define VM_MIN_NAMELEN 6
139 #define VM_MAX_NAMELEN \
140 (SPECNAMELEN - VM_MAX_PREFIXLEN - VM_MAX_SUFFIXLEN - 1)
141
142 #ifdef _KERNEL
143 CTASSERT(VM_MAX_NAMELEN >= VM_MIN_NAMELEN);
144
145 struct vm;
146 struct vm_exception;
147 struct seg_desc;
148 struct vm_exit;
149 struct vm_run;
150 struct vhpet;
151 struct vioapic;
152 struct vlapic;
153 struct vmspace;
154 struct vm_object;
155 struct vm_guest_paging;
156 struct pmap;
157 enum snapshot_req;
158
159 struct vm_eventinfo {
160 void *rptr; /* rendezvous cookie */
161 int *sptr; /* suspend cookie */
162 int *iptr; /* reqidle cookie */
163 };
164
165 typedef int (*vmm_init_func_t)(int ipinum);
166 typedef int (*vmm_cleanup_func_t)(void);
167 typedef void (*vmm_resume_func_t)(void);
168 typedef void * (*vmi_init_func_t)(struct vm *vm, struct pmap *pmap);
169 typedef int (*vmi_run_func_t)(void *vmi, int vcpu, register_t rip,
170 struct pmap *pmap, struct vm_eventinfo *info);
171 typedef void (*vmi_cleanup_func_t)(void *vmi);
172 typedef int (*vmi_get_register_t)(void *vmi, int vcpu, int num,
173 uint64_t *retval);
174 typedef int (*vmi_set_register_t)(void *vmi, int vcpu, int num,
175 uint64_t val);
176 typedef int (*vmi_get_desc_t)(void *vmi, int vcpu, int num,
177 struct seg_desc *desc);
178 typedef int (*vmi_set_desc_t)(void *vmi, int vcpu, int num,
179 struct seg_desc *desc);
180 typedef int (*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval);
181 typedef int (*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val);
182 typedef struct vmspace * (*vmi_vmspace_alloc)(vm_offset_t min, vm_offset_t max);
183 typedef void (*vmi_vmspace_free)(struct vmspace *vmspace);
184 typedef struct vlapic * (*vmi_vlapic_init)(void *vmi, int vcpu);
185 typedef void (*vmi_vlapic_cleanup)(void *vmi, struct vlapic *vlapic);
186 typedef int (*vmi_snapshot_t)(void *vmi, struct vm_snapshot_meta *meta);
187 typedef int (*vmi_snapshot_vmcx_t)(void *vmi, struct vm_snapshot_meta *meta,
188 int vcpu);
189 typedef int (*vmi_restore_tsc_t)(void *vmi, int vcpuid, uint64_t now);
190
191 struct vmm_ops {
192 vmm_init_func_t modinit; /* module wide initialization */
193 vmm_cleanup_func_t modcleanup;
194 vmm_resume_func_t modresume;
195
196 vmi_init_func_t init; /* vm-specific initialization */
197 vmi_run_func_t run;
198 vmi_cleanup_func_t cleanup;
199 vmi_get_register_t getreg;
200 vmi_set_register_t setreg;
201 vmi_get_desc_t getdesc;
202 vmi_set_desc_t setdesc;
203 vmi_get_cap_t getcap;
204 vmi_set_cap_t setcap;
205 vmi_vmspace_alloc vmspace_alloc;
206 vmi_vmspace_free vmspace_free;
207 vmi_vlapic_init vlapic_init;
208 vmi_vlapic_cleanup vlapic_cleanup;
209
210 /* checkpoint operations */
211 vmi_snapshot_t snapshot;
212 vmi_snapshot_vmcx_t vmcx_snapshot;
213 vmi_restore_tsc_t restore_tsc;
214 };
215
216 extern const struct vmm_ops vmm_ops_intel;
217 extern const struct vmm_ops vmm_ops_amd;
218
219 int vm_create(const char *name, struct vm **retvm);
220 void vm_destroy(struct vm *vm);
221 int vm_reinit(struct vm *vm);
222 const char *vm_name(struct vm *vm);
223 uint16_t vm_get_maxcpus(struct vm *vm);
224 void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
225 uint16_t *threads, uint16_t *maxcpus);
226 int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
227 uint16_t threads, uint16_t maxcpus);
228
229 /*
230 * APIs that modify the guest memory map require all vcpus to be frozen.
231 */
232 int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off,
233 size_t len, int prot, int flags);
234 int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len);
235 int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem);
236 void vm_free_memseg(struct vm *vm, int ident);
237 int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
238 int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len);
239 int vm_assign_pptdev(struct vm *vm, int bus, int slot, int func);
240 int vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func);
241
242 /*
243 * APIs that inspect the guest memory map require only a *single* vcpu to
244 * be frozen. This acts like a read lock on the guest memory map since any
245 * modification requires *all* vcpus to be frozen.
246 */
247 int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
248 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags);
249 int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
250 struct vm_object **objptr);
251 vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm);
252 void *vm_gpa_hold(struct vm *, int vcpuid, vm_paddr_t gpa, size_t len,
253 int prot, void **cookie);
254 void vm_gpa_release(void *cookie);
255 bool vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa);
256
257 int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval);
258 int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val);
259 int vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
260 struct seg_desc *ret_desc);
261 int vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
262 struct seg_desc *desc);
263 int vm_run(struct vm *vm, struct vm_run *vmrun);
264 int vm_suspend(struct vm *vm, enum vm_suspend_how how);
265 int vm_inject_nmi(struct vm *vm, int vcpu);
266 int vm_nmi_pending(struct vm *vm, int vcpuid);
267 void vm_nmi_clear(struct vm *vm, int vcpuid);
268 int vm_inject_extint(struct vm *vm, int vcpu);
269 int vm_extint_pending(struct vm *vm, int vcpuid);
270 void vm_extint_clear(struct vm *vm, int vcpuid);
271 struct vlapic *vm_lapic(struct vm *vm, int cpu);
272 struct vioapic *vm_ioapic(struct vm *vm);
273 struct vhpet *vm_hpet(struct vm *vm);
274 int vm_get_capability(struct vm *vm, int vcpu, int type, int *val);
275 int vm_set_capability(struct vm *vm, int vcpu, int type, int val);
276 int vm_get_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state *state);
277 int vm_set_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state state);
278 int vm_apicid2vcpuid(struct vm *vm, int apicid);
279 int vm_activate_cpu(struct vm *vm, int vcpu);
280 int vm_suspend_cpu(struct vm *vm, int vcpu);
281 int vm_resume_cpu(struct vm *vm, int vcpu);
282 struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid);
283 void vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip);
284 void vm_exit_debug(struct vm *vm, int vcpuid, uint64_t rip);
285 void vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip);
286 void vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip);
287 void vm_exit_reqidle(struct vm *vm, int vcpuid, uint64_t rip);
288 int vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta);
289 int vm_restore_time(struct vm *vm);
290
291 #ifdef _SYS__CPUSET_H_
292 /*
293 * Rendezvous all vcpus specified in 'dest' and execute 'func(arg)'.
294 * The rendezvous 'func(arg)' is not allowed to do anything that will
295 * cause the thread to be put to sleep.
296 *
297 * If the rendezvous is being initiated from a vcpu context then the
298 * 'vcpuid' must refer to that vcpu, otherwise it should be set to -1.
299 *
300 * The caller cannot hold any locks when initiating the rendezvous.
301 *
302 * The implementation of this API may cause vcpus other than those specified
303 * by 'dest' to be stalled. The caller should not rely on any vcpus making
304 * forward progress when the rendezvous is in progress.
305 */
306 typedef void (*vm_rendezvous_func_t)(struct vm *vm, int vcpuid, void *arg);
307 int vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest,
308 vm_rendezvous_func_t func, void *arg);
309 cpuset_t vm_active_cpus(struct vm *vm);
310 cpuset_t vm_debug_cpus(struct vm *vm);
311 cpuset_t vm_suspended_cpus(struct vm *vm);
312 #endif /* _SYS__CPUSET_H_ */
313
314 static __inline int
vcpu_rendezvous_pending(struct vm_eventinfo * info)315 vcpu_rendezvous_pending(struct vm_eventinfo *info)
316 {
317
318 return (*((uintptr_t *)(info->rptr)) != 0);
319 }
320
321 static __inline int
vcpu_suspended(struct vm_eventinfo * info)322 vcpu_suspended(struct vm_eventinfo *info)
323 {
324
325 return (*info->sptr);
326 }
327
328 static __inline int
vcpu_reqidle(struct vm_eventinfo * info)329 vcpu_reqidle(struct vm_eventinfo *info)
330 {
331
332 return (*info->iptr);
333 }
334
335 int vcpu_debugged(struct vm *vm, int vcpuid);
336
337 /*
338 * Return true if device indicated by bus/slot/func is supposed to be a
339 * pci passthrough device.
340 *
341 * Return false otherwise.
342 */
343 bool vmm_is_pptdev(int bus, int slot, int func);
344
345 void *vm_iommu_domain(struct vm *vm);
346
347 enum vcpu_state {
348 VCPU_IDLE,
349 VCPU_FROZEN,
350 VCPU_RUNNING,
351 VCPU_SLEEPING,
352 };
353
354 int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state,
355 bool from_idle);
356 enum vcpu_state vcpu_get_state(struct vm *vm, int vcpu, int *hostcpu);
357
358 static int __inline
vcpu_is_running(struct vm * vm,int vcpu,int * hostcpu)359 vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu)
360 {
361 return (vcpu_get_state(vm, vcpu, hostcpu) == VCPU_RUNNING);
362 }
363
364 #ifdef _SYS_PROC_H_
365 static int __inline
vcpu_should_yield(struct vm * vm,int vcpu)366 vcpu_should_yield(struct vm *vm, int vcpu)
367 {
368
369 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED))
370 return (1);
371 else if (curthread->td_owepreempt)
372 return (1);
373 else
374 return (0);
375 }
376 #endif
377
378 void *vcpu_stats(struct vm *vm, int vcpu);
379 void vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr);
380 struct vmspace *vm_get_vmspace(struct vm *vm);
381 struct vatpic *vm_atpic(struct vm *vm);
382 struct vatpit *vm_atpit(struct vm *vm);
383 struct vpmtmr *vm_pmtmr(struct vm *vm);
384 struct vrtc *vm_rtc(struct vm *vm);
385
386 /*
387 * Inject exception 'vector' into the guest vcpu. This function returns 0 on
388 * success and non-zero on failure.
389 *
390 * Wrapper functions like 'vm_inject_gp()' should be preferred to calling
391 * this function directly because they enforce the trap-like or fault-like
392 * behavior of an exception.
393 *
394 * This function should only be called in the context of the thread that is
395 * executing this vcpu.
396 */
397 int vm_inject_exception(struct vm *vm, int vcpuid, int vector, int err_valid,
398 uint32_t errcode, int restart_instruction);
399
400 /*
401 * This function is called after a VM-exit that occurred during exception or
402 * interrupt delivery through the IDT. The format of 'intinfo' is described
403 * in Figure 15-1, "EXITINTINFO for All Intercepts", APM, Vol 2.
404 *
405 * If a VM-exit handler completes the event delivery successfully then it
406 * should call vm_exit_intinfo() to extinguish the pending event. For e.g.,
407 * if the task switch emulation is triggered via a task gate then it should
408 * call this function with 'intinfo=0' to indicate that the external event
409 * is not pending anymore.
410 *
411 * Return value is 0 on success and non-zero on failure.
412 */
413 int vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t intinfo);
414
415 /*
416 * This function is called before every VM-entry to retrieve a pending
417 * event that should be injected into the guest. This function combines
418 * nested events into a double or triple fault.
419 *
420 * Returns 0 if there are no events that need to be injected into the guest
421 * and non-zero otherwise.
422 */
423 int vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *info);
424
425 int vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2);
426
427 /*
428 * Function used to keep track of the guest's TSC offset. The
429 * offset is used by the virutalization extensions to provide a consistent
430 * value for the Time Stamp Counter to the guest.
431 *
432 * Return value is 0 on success and non-zero on failure.
433 */
434 int vm_set_tsc_offset(struct vm *vm, int vcpu_id, uint64_t offset);
435
436 enum vm_reg_name vm_segment_name(int seg_encoding);
437
438 struct vm_copyinfo {
439 uint64_t gpa;
440 size_t len;
441 void *hva;
442 void *cookie;
443 };
444
445 /*
446 * Set up 'copyinfo[]' to copy to/from guest linear address space starting
447 * at 'gla' and 'len' bytes long. The 'prot' should be set to PROT_READ for
448 * a copyin or PROT_WRITE for a copyout.
449 *
450 * retval is_fault Interpretation
451 * 0 0 Success
452 * 0 1 An exception was injected into the guest
453 * EFAULT N/A Unrecoverable error
454 *
455 * The 'copyinfo[]' can be passed to 'vm_copyin()' or 'vm_copyout()' only if
456 * the return value is 0. The 'copyinfo[]' resources should be freed by calling
457 * 'vm_copy_teardown()' after the copy is done.
458 */
459 int vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
460 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
461 int num_copyinfo, int *is_fault);
462 void vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
463 int num_copyinfo);
464 void vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
465 void *kaddr, size_t len);
466 void vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
467 struct vm_copyinfo *copyinfo, size_t len);
468
469 int vcpu_trace_exceptions(struct vm *vm, int vcpuid);
470 #endif /* KERNEL */
471
472 #define VM_MAXCPU 16 /* maximum virtual cpus */
473
474 /*
475 * Identifiers for optional vmm capabilities
476 */
477 enum vm_cap_type {
478 VM_CAP_HALT_EXIT,
479 VM_CAP_MTRAP_EXIT,
480 VM_CAP_PAUSE_EXIT,
481 VM_CAP_UNRESTRICTED_GUEST,
482 VM_CAP_ENABLE_INVPCID,
483 VM_CAP_BPT_EXIT,
484 VM_CAP_RDPID,
485 VM_CAP_RDTSCP,
486 VM_CAP_MAX
487 };
488
489 enum vm_intr_trigger {
490 EDGE_TRIGGER,
491 LEVEL_TRIGGER
492 };
493
494 /*
495 * The 'access' field has the format specified in Table 21-2 of the Intel
496 * Architecture Manual vol 3b.
497 *
498 * XXX The contents of the 'access' field are architecturally defined except
499 * bit 16 - Segment Unusable.
500 */
501 struct seg_desc {
502 uint64_t base;
503 uint32_t limit;
504 uint32_t access;
505 };
506 #define SEG_DESC_TYPE(access) ((access) & 0x001f)
507 #define SEG_DESC_DPL(access) (((access) >> 5) & 0x3)
508 #define SEG_DESC_PRESENT(access) (((access) & 0x0080) ? 1 : 0)
509 #define SEG_DESC_DEF32(access) (((access) & 0x4000) ? 1 : 0)
510 #define SEG_DESC_GRANULARITY(access) (((access) & 0x8000) ? 1 : 0)
511 #define SEG_DESC_UNUSABLE(access) (((access) & 0x10000) ? 1 : 0)
512
513 enum vm_cpu_mode {
514 CPU_MODE_REAL,
515 CPU_MODE_PROTECTED,
516 CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */
517 CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
518 };
519
520 enum vm_paging_mode {
521 PAGING_MODE_FLAT,
522 PAGING_MODE_32,
523 PAGING_MODE_PAE,
524 PAGING_MODE_64,
525 PAGING_MODE_64_LA57,
526 };
527
528 struct vm_guest_paging {
529 uint64_t cr3;
530 int cpl;
531 enum vm_cpu_mode cpu_mode;
532 enum vm_paging_mode paging_mode;
533 };
534
535 /*
536 * The data structures 'vie' and 'vie_op' are meant to be opaque to the
537 * consumers of instruction decoding. The only reason why their contents
538 * need to be exposed is because they are part of the 'vm_exit' structure.
539 */
540 struct vie_op {
541 uint8_t op_byte; /* actual opcode byte */
542 uint8_t op_type; /* type of operation (e.g. MOV) */
543 uint16_t op_flags;
544 };
545 _Static_assert(sizeof(struct vie_op) == 4, "ABI");
546 _Static_assert(_Alignof(struct vie_op) == 2, "ABI");
547
548 #define VIE_INST_SIZE 15
549 struct vie {
550 uint8_t inst[VIE_INST_SIZE]; /* instruction bytes */
551 uint8_t num_valid; /* size of the instruction */
552
553 /* The following fields are all zeroed upon restart. */
554 #define vie_startzero num_processed
555 uint8_t num_processed;
556
557 uint8_t addrsize:4, opsize:4; /* address and operand sizes */
558 uint8_t rex_w:1, /* REX prefix */
559 rex_r:1,
560 rex_x:1,
561 rex_b:1,
562 rex_present:1,
563 repz_present:1, /* REP/REPE/REPZ prefix */
564 repnz_present:1, /* REPNE/REPNZ prefix */
565 opsize_override:1, /* Operand size override */
566 addrsize_override:1, /* Address size override */
567 segment_override:1; /* Segment override */
568
569 uint8_t mod:2, /* ModRM byte */
570 reg:4,
571 rm:4;
572
573 uint8_t ss:2, /* SIB byte */
574 vex_present:1, /* VEX prefixed */
575 vex_l:1, /* L bit */
576 index:4, /* SIB byte */
577 base:4; /* SIB byte */
578
579 uint8_t disp_bytes;
580 uint8_t imm_bytes;
581
582 uint8_t scale;
583
584 uint8_t vex_reg:4, /* vvvv: first source register specifier */
585 vex_pp:2, /* pp */
586 _sparebits:2;
587
588 uint8_t _sparebytes[2];
589
590 int base_register; /* VM_REG_GUEST_xyz */
591 int index_register; /* VM_REG_GUEST_xyz */
592 int segment_register; /* VM_REG_GUEST_xyz */
593
594 int64_t displacement; /* optional addr displacement */
595 int64_t immediate; /* optional immediate operand */
596
597 uint8_t decoded; /* set to 1 if successfully decoded */
598
599 uint8_t _sparebyte;
600
601 struct vie_op op; /* opcode description */
602 };
603 _Static_assert(sizeof(struct vie) == 64, "ABI");
604 _Static_assert(__offsetof(struct vie, disp_bytes) == 22, "ABI");
605 _Static_assert(__offsetof(struct vie, scale) == 24, "ABI");
606 _Static_assert(__offsetof(struct vie, base_register) == 28, "ABI");
607
608 enum vm_exitcode {
609 VM_EXITCODE_INOUT,
610 VM_EXITCODE_VMX,
611 VM_EXITCODE_BOGUS,
612 VM_EXITCODE_RDMSR,
613 VM_EXITCODE_WRMSR,
614 VM_EXITCODE_HLT,
615 VM_EXITCODE_MTRAP,
616 VM_EXITCODE_PAUSE,
617 VM_EXITCODE_PAGING,
618 VM_EXITCODE_INST_EMUL,
619 VM_EXITCODE_SPINUP_AP,
620 VM_EXITCODE_DEPRECATED1, /* used to be SPINDOWN_CPU */
621 VM_EXITCODE_RENDEZVOUS,
622 VM_EXITCODE_IOAPIC_EOI,
623 VM_EXITCODE_SUSPENDED,
624 VM_EXITCODE_INOUT_STR,
625 VM_EXITCODE_TASK_SWITCH,
626 VM_EXITCODE_MONITOR,
627 VM_EXITCODE_MWAIT,
628 VM_EXITCODE_SVM,
629 VM_EXITCODE_REQIDLE,
630 VM_EXITCODE_DEBUG,
631 VM_EXITCODE_VMINSN,
632 VM_EXITCODE_BPT,
633 VM_EXITCODE_MAX
634 };
635
636 struct vm_inout {
637 uint16_t bytes:3; /* 1 or 2 or 4 */
638 uint16_t in:1;
639 uint16_t string:1;
640 uint16_t rep:1;
641 uint16_t port;
642 uint32_t eax; /* valid for out */
643 };
644
645 struct vm_inout_str {
646 struct vm_inout inout; /* must be the first element */
647 struct vm_guest_paging paging;
648 uint64_t rflags;
649 uint64_t cr0;
650 uint64_t index;
651 uint64_t count; /* rep=1 (%rcx), rep=0 (1) */
652 int addrsize;
653 enum vm_reg_name seg_name;
654 struct seg_desc seg_desc;
655 };
656
657 enum task_switch_reason {
658 TSR_CALL,
659 TSR_IRET,
660 TSR_JMP,
661 TSR_IDT_GATE, /* task gate in IDT */
662 };
663
664 struct vm_task_switch {
665 uint16_t tsssel; /* new TSS selector */
666 int ext; /* task switch due to external event */
667 uint32_t errcode;
668 int errcode_valid; /* push 'errcode' on the new stack */
669 enum task_switch_reason reason;
670 struct vm_guest_paging paging;
671 };
672
673 struct vm_exit {
674 enum vm_exitcode exitcode;
675 int inst_length; /* 0 means unknown */
676 uint64_t rip;
677 union {
678 struct vm_inout inout;
679 struct vm_inout_str inout_str;
680 struct {
681 uint64_t gpa;
682 int fault_type;
683 } paging;
684 struct {
685 uint64_t gpa;
686 uint64_t gla;
687 uint64_t cs_base;
688 int cs_d; /* CS.D */
689 struct vm_guest_paging paging;
690 struct vie vie;
691 } inst_emul;
692 /*
693 * VMX specific payload. Used when there is no "better"
694 * exitcode to represent the VM-exit.
695 */
696 struct {
697 int status; /* vmx inst status */
698 /*
699 * 'exit_reason' and 'exit_qualification' are valid
700 * only if 'status' is zero.
701 */
702 uint32_t exit_reason;
703 uint64_t exit_qualification;
704 /*
705 * 'inst_error' and 'inst_type' are valid
706 * only if 'status' is non-zero.
707 */
708 int inst_type;
709 int inst_error;
710 } vmx;
711 /*
712 * SVM specific payload.
713 */
714 struct {
715 uint64_t exitcode;
716 uint64_t exitinfo1;
717 uint64_t exitinfo2;
718 } svm;
719 struct {
720 int inst_length;
721 } bpt;
722 struct {
723 uint32_t code; /* ecx value */
724 uint64_t wval;
725 } msr;
726 struct {
727 int vcpu;
728 uint64_t rip;
729 } spinup_ap;
730 struct {
731 uint64_t rflags;
732 uint64_t intr_status;
733 } hlt;
734 struct {
735 int vector;
736 } ioapic_eoi;
737 struct {
738 enum vm_suspend_how how;
739 } suspended;
740 struct vm_task_switch task_switch;
741 } u;
742 };
743
744 /* APIs to inject faults into the guest */
745 void vm_inject_fault(void *vm, int vcpuid, int vector, int errcode_valid,
746 int errcode);
747
748 static __inline void
vm_inject_ud(void * vm,int vcpuid)749 vm_inject_ud(void *vm, int vcpuid)
750 {
751 vm_inject_fault(vm, vcpuid, IDT_UD, 0, 0);
752 }
753
754 static __inline void
vm_inject_gp(void * vm,int vcpuid)755 vm_inject_gp(void *vm, int vcpuid)
756 {
757 vm_inject_fault(vm, vcpuid, IDT_GP, 1, 0);
758 }
759
760 static __inline void
vm_inject_ac(void * vm,int vcpuid,int errcode)761 vm_inject_ac(void *vm, int vcpuid, int errcode)
762 {
763 vm_inject_fault(vm, vcpuid, IDT_AC, 1, errcode);
764 }
765
766 static __inline void
vm_inject_ss(void * vm,int vcpuid,int errcode)767 vm_inject_ss(void *vm, int vcpuid, int errcode)
768 {
769 vm_inject_fault(vm, vcpuid, IDT_SS, 1, errcode);
770 }
771
772 void vm_inject_pf(void *vm, int vcpuid, int error_code, uint64_t cr2);
773
774 int vm_restart_instruction(void *vm, int vcpuid);
775
776 #endif /* _VMM_H_ */
777