1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2013 Anish Gupta ([email protected])
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30
31 #ifndef _SVM_SOFTC_H_
32 #define _SVM_SOFTC_H_
33
34 #define SVM_IO_BITMAP_SIZE (3 * PAGE_SIZE)
35 #define SVM_MSR_BITMAP_SIZE (2 * PAGE_SIZE)
36
37 struct asid {
38 uint64_t gen; /* range is [1, ~0UL] */
39 uint32_t num; /* range is [1, nasid - 1] */
40 };
41
42 /*
43 * XXX separate out 'struct vmcb' from 'svm_vcpu' to avoid wasting space
44 * due to VMCB alignment requirements.
45 */
46 struct svm_vcpu {
47 struct vmcb vmcb; /* hardware saved vcpu context */
48 struct svm_regctx swctx; /* software saved vcpu context */
49 uint64_t vmcb_pa; /* VMCB physical address */
50 uint64_t nextrip; /* next instruction to be executed by guest */
51 int lastcpu; /* host cpu that the vcpu last ran on */
52 uint32_t dirty; /* state cache bits that must be cleared */
53 long eptgen; /* pmap->pm_eptgen when the vcpu last ran */
54 struct asid asid;
55 } __aligned(PAGE_SIZE);
56
57 /*
58 * SVM softc, one per virtual machine.
59 */
60 struct svm_softc {
61 uint8_t apic_page[VM_MAXCPU][PAGE_SIZE];
62 struct svm_vcpu vcpu[VM_MAXCPU];
63 vm_offset_t nptp; /* nested page table */
64 uint8_t *iopm_bitmap; /* shared by all vcpus */
65 uint8_t *msr_bitmap; /* shared by all vcpus */
66 struct vm *vm;
67 };
68
69 CTASSERT((offsetof(struct svm_softc, nptp) & PAGE_MASK) == 0);
70
71 static __inline struct svm_vcpu *
svm_get_vcpu(struct svm_softc * sc,int vcpu)72 svm_get_vcpu(struct svm_softc *sc, int vcpu)
73 {
74
75 return (&(sc->vcpu[vcpu]));
76 }
77
78 static __inline struct vmcb *
svm_get_vmcb(struct svm_softc * sc,int vcpu)79 svm_get_vmcb(struct svm_softc *sc, int vcpu)
80 {
81
82 return (&(sc->vcpu[vcpu].vmcb));
83 }
84
85 static __inline struct vmcb_state *
svm_get_vmcb_state(struct svm_softc * sc,int vcpu)86 svm_get_vmcb_state(struct svm_softc *sc, int vcpu)
87 {
88
89 return (&(sc->vcpu[vcpu].vmcb.state));
90 }
91
92 static __inline struct vmcb_ctrl *
svm_get_vmcb_ctrl(struct svm_softc * sc,int vcpu)93 svm_get_vmcb_ctrl(struct svm_softc *sc, int vcpu)
94 {
95
96 return (&(sc->vcpu[vcpu].vmcb.ctrl));
97 }
98
99 static __inline struct svm_regctx *
svm_get_guest_regctx(struct svm_softc * sc,int vcpu)100 svm_get_guest_regctx(struct svm_softc *sc, int vcpu)
101 {
102
103 return (&(sc->vcpu[vcpu].swctx));
104 }
105
106 static __inline void
svm_set_dirty(struct svm_softc * sc,int vcpu,uint32_t dirtybits)107 svm_set_dirty(struct svm_softc *sc, int vcpu, uint32_t dirtybits)
108 {
109 struct svm_vcpu *vcpustate;
110
111 vcpustate = svm_get_vcpu(sc, vcpu);
112
113 vcpustate->dirty |= dirtybits;
114 }
115
116 #endif /* _SVM_SOFTC_H_ */
117