xref: /linux-6.15/include/linux/kvm_host.h (revision 8fdff1dc)
1 #ifndef __KVM_HOST_H
2 #define __KVM_HOST_H
3 
4 /*
5  * This work is licensed under the terms of the GNU GPL, version 2.  See
6  * the COPYING file in the top-level directory.
7  */
8 
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/bug.h>
17 #include <linux/mm.h>
18 #include <linux/mmu_notifier.h>
19 #include <linux/preempt.h>
20 #include <linux/msi.h>
21 #include <linux/slab.h>
22 #include <linux/rcupdate.h>
23 #include <linux/ratelimit.h>
24 #include <linux/err.h>
25 #include <asm/signal.h>
26 
27 #include <linux/kvm.h>
28 #include <linux/kvm_para.h>
29 
30 #include <linux/kvm_types.h>
31 
32 #include <asm/kvm_host.h>
33 
34 #ifndef KVM_MMIO_SIZE
35 #define KVM_MMIO_SIZE 8
36 #endif
37 
38 /*
39  * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
40  * in kvm, other bits are visible for userspace which are defined in
41  * include/linux/kvm_h.
42  */
43 #define KVM_MEMSLOT_INVALID	(1UL << 16)
44 
45 /* Two fragments for cross MMIO pages. */
46 #define KVM_MAX_MMIO_FRAGMENTS	2
47 
48 /*
49  * For the normal pfn, the highest 12 bits should be zero,
50  * so we can mask bit 62 ~ bit 52  to indicate the error pfn,
51  * mask bit 63 to indicate the noslot pfn.
52  */
53 #define KVM_PFN_ERR_MASK	(0x7ffULL << 52)
54 #define KVM_PFN_ERR_NOSLOT_MASK	(0xfffULL << 52)
55 #define KVM_PFN_NOSLOT		(0x1ULL << 63)
56 
57 #define KVM_PFN_ERR_FAULT	(KVM_PFN_ERR_MASK)
58 #define KVM_PFN_ERR_HWPOISON	(KVM_PFN_ERR_MASK + 1)
59 #define KVM_PFN_ERR_RO_FAULT	(KVM_PFN_ERR_MASK + 2)
60 
61 /*
62  * error pfns indicate that the gfn is in slot but faild to
63  * translate it to pfn on host.
64  */
65 static inline bool is_error_pfn(pfn_t pfn)
66 {
67 	return !!(pfn & KVM_PFN_ERR_MASK);
68 }
69 
70 /*
71  * error_noslot pfns indicate that the gfn can not be
72  * translated to pfn - it is not in slot or failed to
73  * translate it to pfn.
74  */
75 static inline bool is_error_noslot_pfn(pfn_t pfn)
76 {
77 	return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
78 }
79 
80 /* noslot pfn indicates that the gfn is not in slot. */
81 static inline bool is_noslot_pfn(pfn_t pfn)
82 {
83 	return pfn == KVM_PFN_NOSLOT;
84 }
85 
86 #define KVM_HVA_ERR_BAD		(PAGE_OFFSET)
87 #define KVM_HVA_ERR_RO_BAD	(PAGE_OFFSET + PAGE_SIZE)
88 
89 static inline bool kvm_is_error_hva(unsigned long addr)
90 {
91 	return addr >= PAGE_OFFSET;
92 }
93 
94 #define KVM_ERR_PTR_BAD_PAGE	(ERR_PTR(-ENOENT))
95 
96 static inline bool is_error_page(struct page *page)
97 {
98 	return IS_ERR(page);
99 }
100 
101 /*
102  * vcpu->requests bit members
103  */
104 #define KVM_REQ_TLB_FLUSH          0
105 #define KVM_REQ_MIGRATE_TIMER      1
106 #define KVM_REQ_REPORT_TPR_ACCESS  2
107 #define KVM_REQ_MMU_RELOAD         3
108 #define KVM_REQ_TRIPLE_FAULT       4
109 #define KVM_REQ_PENDING_TIMER      5
110 #define KVM_REQ_UNHALT             6
111 #define KVM_REQ_MMU_SYNC           7
112 #define KVM_REQ_CLOCK_UPDATE       8
113 #define KVM_REQ_KICK               9
114 #define KVM_REQ_DEACTIVATE_FPU    10
115 #define KVM_REQ_EVENT             11
116 #define KVM_REQ_APF_HALT          12
117 #define KVM_REQ_STEAL_UPDATE      13
118 #define KVM_REQ_NMI               14
119 #define KVM_REQ_IMMEDIATE_EXIT    15
120 #define KVM_REQ_PMU               16
121 #define KVM_REQ_PMI               17
122 #define KVM_REQ_WATCHDOG          18
123 #define KVM_REQ_MASTERCLOCK_UPDATE 19
124 #define KVM_REQ_MCLOCK_INPROGRESS 20
125 
126 #define KVM_USERSPACE_IRQ_SOURCE_ID		0
127 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID	1
128 
129 struct kvm;
130 struct kvm_vcpu;
131 extern struct kmem_cache *kvm_vcpu_cache;
132 
133 struct kvm_io_range {
134 	gpa_t addr;
135 	int len;
136 	struct kvm_io_device *dev;
137 };
138 
139 #define NR_IOBUS_DEVS 1000
140 
141 struct kvm_io_bus {
142 	int                   dev_count;
143 	struct kvm_io_range range[];
144 };
145 
146 enum kvm_bus {
147 	KVM_MMIO_BUS,
148 	KVM_PIO_BUS,
149 	KVM_NR_BUSES
150 };
151 
152 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
153 		     int len, const void *val);
154 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
155 		    void *val);
156 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
157 			    int len, struct kvm_io_device *dev);
158 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
159 			      struct kvm_io_device *dev);
160 
161 #ifdef CONFIG_KVM_ASYNC_PF
162 struct kvm_async_pf {
163 	struct work_struct work;
164 	struct list_head link;
165 	struct list_head queue;
166 	struct kvm_vcpu *vcpu;
167 	struct mm_struct *mm;
168 	gva_t gva;
169 	unsigned long addr;
170 	struct kvm_arch_async_pf arch;
171 	struct page *page;
172 	bool done;
173 };
174 
175 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
176 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
177 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
178 		       struct kvm_arch_async_pf *arch);
179 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
180 #endif
181 
182 enum {
183 	OUTSIDE_GUEST_MODE,
184 	IN_GUEST_MODE,
185 	EXITING_GUEST_MODE,
186 	READING_SHADOW_PAGE_TABLES,
187 };
188 
189 /*
190  * Sometimes a large or cross-page mmio needs to be broken up into separate
191  * exits for userspace servicing.
192  */
193 struct kvm_mmio_fragment {
194 	gpa_t gpa;
195 	void *data;
196 	unsigned len;
197 };
198 
199 struct kvm_vcpu {
200 	struct kvm *kvm;
201 #ifdef CONFIG_PREEMPT_NOTIFIERS
202 	struct preempt_notifier preempt_notifier;
203 #endif
204 	int cpu;
205 	int vcpu_id;
206 	int srcu_idx;
207 	int mode;
208 	unsigned long requests;
209 	unsigned long guest_debug;
210 
211 	struct mutex mutex;
212 	struct kvm_run *run;
213 
214 	int fpu_active;
215 	int guest_fpu_loaded, guest_xcr0_loaded;
216 	wait_queue_head_t wq;
217 	struct pid *pid;
218 	int sigset_active;
219 	sigset_t sigset;
220 	struct kvm_vcpu_stat stat;
221 
222 #ifdef CONFIG_HAS_IOMEM
223 	int mmio_needed;
224 	int mmio_read_completed;
225 	int mmio_is_write;
226 	int mmio_cur_fragment;
227 	int mmio_nr_fragments;
228 	struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
229 #endif
230 
231 #ifdef CONFIG_KVM_ASYNC_PF
232 	struct {
233 		u32 queued;
234 		struct list_head queue;
235 		struct list_head done;
236 		spinlock_t lock;
237 	} async_pf;
238 #endif
239 
240 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
241 	/*
242 	 * Cpu relax intercept or pause loop exit optimization
243 	 * in_spin_loop: set when a vcpu does a pause loop exit
244 	 *  or cpu relax intercepted.
245 	 * dy_eligible: indicates whether vcpu is eligible for directed yield.
246 	 */
247 	struct {
248 		bool in_spin_loop;
249 		bool dy_eligible;
250 	} spin_loop;
251 #endif
252 	struct kvm_vcpu_arch arch;
253 };
254 
255 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
256 {
257 	return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
258 }
259 
260 /*
261  * Some of the bitops functions do not support too long bitmaps.
262  * This number must be determined not to exceed such limits.
263  */
264 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
265 
266 struct kvm_memory_slot {
267 	gfn_t base_gfn;
268 	unsigned long npages;
269 	unsigned long flags;
270 	unsigned long *dirty_bitmap;
271 	struct kvm_arch_memory_slot arch;
272 	unsigned long userspace_addr;
273 	int user_alloc;
274 	int id;
275 };
276 
277 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
278 {
279 	return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
280 }
281 
282 struct kvm_kernel_irq_routing_entry {
283 	u32 gsi;
284 	u32 type;
285 	int (*set)(struct kvm_kernel_irq_routing_entry *e,
286 		   struct kvm *kvm, int irq_source_id, int level);
287 	union {
288 		struct {
289 			unsigned irqchip;
290 			unsigned pin;
291 		} irqchip;
292 		struct msi_msg msi;
293 	};
294 	struct hlist_node link;
295 };
296 
297 #ifdef __KVM_HAVE_IOAPIC
298 
299 struct kvm_irq_routing_table {
300 	int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS];
301 	struct kvm_kernel_irq_routing_entry *rt_entries;
302 	u32 nr_rt_entries;
303 	/*
304 	 * Array indexed by gsi. Each entry contains list of irq chips
305 	 * the gsi is connected to.
306 	 */
307 	struct hlist_head map[0];
308 };
309 
310 #else
311 
312 struct kvm_irq_routing_table {};
313 
314 #endif
315 
316 #ifndef KVM_MEM_SLOTS_NUM
317 #define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
318 #endif
319 
320 /*
321  * Note:
322  * memslots are not sorted by id anymore, please use id_to_memslot()
323  * to get the memslot by its id.
324  */
325 struct kvm_memslots {
326 	u64 generation;
327 	struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
328 	/* The mapping table from slot id to the index in memslots[]. */
329 	int id_to_index[KVM_MEM_SLOTS_NUM];
330 };
331 
332 struct kvm {
333 	spinlock_t mmu_lock;
334 	struct mutex slots_lock;
335 	struct mm_struct *mm; /* userspace tied to this vm */
336 	struct kvm_memslots *memslots;
337 	struct srcu_struct srcu;
338 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
339 	u32 bsp_vcpu_id;
340 #endif
341 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
342 	atomic_t online_vcpus;
343 	int last_boosted_vcpu;
344 	struct list_head vm_list;
345 	struct mutex lock;
346 	struct kvm_io_bus *buses[KVM_NR_BUSES];
347 #ifdef CONFIG_HAVE_KVM_EVENTFD
348 	struct {
349 		spinlock_t        lock;
350 		struct list_head  items;
351 		struct list_head  resampler_list;
352 		struct mutex      resampler_lock;
353 	} irqfds;
354 	struct list_head ioeventfds;
355 #endif
356 	struct kvm_vm_stat stat;
357 	struct kvm_arch arch;
358 	atomic_t users_count;
359 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
360 	struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
361 	spinlock_t ring_lock;
362 	struct list_head coalesced_zones;
363 #endif
364 
365 	struct mutex irq_lock;
366 #ifdef CONFIG_HAVE_KVM_IRQCHIP
367 	/*
368 	 * Update side is protected by irq_lock and,
369 	 * if configured, irqfds.lock.
370 	 */
371 	struct kvm_irq_routing_table __rcu *irq_routing;
372 	struct hlist_head mask_notifier_list;
373 	struct hlist_head irq_ack_notifier_list;
374 #endif
375 
376 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
377 	struct mmu_notifier mmu_notifier;
378 	unsigned long mmu_notifier_seq;
379 	long mmu_notifier_count;
380 #endif
381 	long tlbs_dirty;
382 };
383 
384 #define kvm_err(fmt, ...) \
385 	pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
386 #define kvm_info(fmt, ...) \
387 	pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
388 #define kvm_debug(fmt, ...) \
389 	pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
390 #define kvm_pr_unimpl(fmt, ...) \
391 	pr_err_ratelimited("kvm [%i]: " fmt, \
392 			   task_tgid_nr(current), ## __VA_ARGS__)
393 
394 /* The guest did something we don't support. */
395 #define vcpu_unimpl(vcpu, fmt, ...)					\
396 	kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
397 
398 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
399 {
400 	smp_rmb();
401 	return kvm->vcpus[i];
402 }
403 
404 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
405 	for (idx = 0; \
406 	     idx < atomic_read(&kvm->online_vcpus) && \
407 	     (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
408 	     idx++)
409 
410 #define kvm_for_each_memslot(memslot, slots)	\
411 	for (memslot = &slots->memslots[0];	\
412 	      memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
413 		memslot++)
414 
415 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
416 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
417 
418 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
419 void vcpu_put(struct kvm_vcpu *vcpu);
420 
421 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
422 		  struct module *module);
423 void kvm_exit(void);
424 
425 void kvm_get_kvm(struct kvm *kvm);
426 void kvm_put_kvm(struct kvm *kvm);
427 void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new);
428 
429 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
430 {
431 	return rcu_dereference_check(kvm->memslots,
432 			srcu_read_lock_held(&kvm->srcu)
433 			|| lockdep_is_held(&kvm->slots_lock));
434 }
435 
436 static inline struct kvm_memory_slot *
437 id_to_memslot(struct kvm_memslots *slots, int id)
438 {
439 	int index = slots->id_to_index[id];
440 	struct kvm_memory_slot *slot;
441 
442 	slot = &slots->memslots[index];
443 
444 	WARN_ON(slot->id != id);
445 	return slot;
446 }
447 
448 int kvm_set_memory_region(struct kvm *kvm,
449 			  struct kvm_userspace_memory_region *mem,
450 			  int user_alloc);
451 int __kvm_set_memory_region(struct kvm *kvm,
452 			    struct kvm_userspace_memory_region *mem,
453 			    int user_alloc);
454 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
455 			   struct kvm_memory_slot *dont);
456 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
457 int kvm_arch_prepare_memory_region(struct kvm *kvm,
458 				struct kvm_memory_slot *memslot,
459 				struct kvm_memory_slot old,
460 				struct kvm_userspace_memory_region *mem,
461 				int user_alloc);
462 void kvm_arch_commit_memory_region(struct kvm *kvm,
463 				struct kvm_userspace_memory_region *mem,
464 				struct kvm_memory_slot old,
465 				int user_alloc);
466 bool kvm_largepages_enabled(void);
467 void kvm_disable_largepages(void);
468 /* flush all memory translations */
469 void kvm_arch_flush_shadow_all(struct kvm *kvm);
470 /* flush memory translations pointing to 'slot' */
471 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
472 				   struct kvm_memory_slot *slot);
473 
474 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
475 			    int nr_pages);
476 
477 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
478 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
479 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
480 void kvm_release_page_clean(struct page *page);
481 void kvm_release_page_dirty(struct page *page);
482 void kvm_set_page_dirty(struct page *page);
483 void kvm_set_page_accessed(struct page *page);
484 
485 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
486 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
487 		       bool write_fault, bool *writable);
488 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
489 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
490 		      bool *writable);
491 pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
492 pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
493 
494 void kvm_release_pfn_dirty(pfn_t pfn);
495 void kvm_release_pfn_clean(pfn_t pfn);
496 void kvm_set_pfn_dirty(pfn_t pfn);
497 void kvm_set_pfn_accessed(pfn_t pfn);
498 void kvm_get_pfn(pfn_t pfn);
499 
500 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
501 			int len);
502 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
503 			  unsigned long len);
504 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
505 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
506 			   void *data, unsigned long len);
507 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
508 			 int offset, int len);
509 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
510 		    unsigned long len);
511 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
512 			   void *data, unsigned long len);
513 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
514 			      gpa_t gpa);
515 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
516 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
517 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
518 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
519 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
520 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
521 void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
522 			     gfn_t gfn);
523 
524 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
525 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
526 bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
527 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
528 void kvm_resched(struct kvm_vcpu *vcpu);
529 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
530 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
531 
532 void kvm_flush_remote_tlbs(struct kvm *kvm);
533 void kvm_reload_remote_mmus(struct kvm *kvm);
534 void kvm_make_mclock_inprogress_request(struct kvm *kvm);
535 
536 long kvm_arch_dev_ioctl(struct file *filp,
537 			unsigned int ioctl, unsigned long arg);
538 long kvm_arch_vcpu_ioctl(struct file *filp,
539 			 unsigned int ioctl, unsigned long arg);
540 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
541 
542 int kvm_dev_ioctl_check_extension(long ext);
543 
544 int kvm_get_dirty_log(struct kvm *kvm,
545 			struct kvm_dirty_log *log, int *is_dirty);
546 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
547 				struct kvm_dirty_log *log);
548 
549 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
550 				   struct
551 				   kvm_userspace_memory_region *mem,
552 				   int user_alloc);
553 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level);
554 long kvm_arch_vm_ioctl(struct file *filp,
555 		       unsigned int ioctl, unsigned long arg);
556 
557 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
558 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
559 
560 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
561 				    struct kvm_translation *tr);
562 
563 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
564 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
565 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
566 				  struct kvm_sregs *sregs);
567 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
568 				  struct kvm_sregs *sregs);
569 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
570 				    struct kvm_mp_state *mp_state);
571 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
572 				    struct kvm_mp_state *mp_state);
573 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
574 					struct kvm_guest_debug *dbg);
575 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
576 
577 int kvm_arch_init(void *opaque);
578 void kvm_arch_exit(void);
579 
580 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
581 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
582 
583 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
584 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
585 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
586 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
587 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
588 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
589 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
590 
591 int kvm_arch_hardware_enable(void *garbage);
592 void kvm_arch_hardware_disable(void *garbage);
593 int kvm_arch_hardware_setup(void);
594 void kvm_arch_hardware_unsetup(void);
595 void kvm_arch_check_processor_compat(void *rtn);
596 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
597 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
598 
599 void kvm_free_physmem(struct kvm *kvm);
600 
601 void *kvm_kvzalloc(unsigned long size);
602 void kvm_kvfree(const void *addr);
603 
604 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
605 static inline struct kvm *kvm_arch_alloc_vm(void)
606 {
607 	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
608 }
609 
610 static inline void kvm_arch_free_vm(struct kvm *kvm)
611 {
612 	kfree(kvm);
613 }
614 #endif
615 
616 static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
617 {
618 #ifdef __KVM_HAVE_ARCH_WQP
619 	return vcpu->arch.wqp;
620 #else
621 	return &vcpu->wq;
622 #endif
623 }
624 
625 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
626 void kvm_arch_destroy_vm(struct kvm *kvm);
627 void kvm_free_all_assigned_devices(struct kvm *kvm);
628 void kvm_arch_sync_events(struct kvm *kvm);
629 
630 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
631 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
632 
633 bool kvm_is_mmio_pfn(pfn_t pfn);
634 
635 struct kvm_irq_ack_notifier {
636 	struct hlist_node link;
637 	unsigned gsi;
638 	void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
639 };
640 
641 struct kvm_assigned_dev_kernel {
642 	struct kvm_irq_ack_notifier ack_notifier;
643 	struct list_head list;
644 	int assigned_dev_id;
645 	int host_segnr;
646 	int host_busnr;
647 	int host_devfn;
648 	unsigned int entries_nr;
649 	int host_irq;
650 	bool host_irq_disabled;
651 	bool pci_2_3;
652 	struct msix_entry *host_msix_entries;
653 	int guest_irq;
654 	struct msix_entry *guest_msix_entries;
655 	unsigned long irq_requested_type;
656 	int irq_source_id;
657 	int flags;
658 	struct pci_dev *dev;
659 	struct kvm *kvm;
660 	spinlock_t intx_lock;
661 	spinlock_t intx_mask_lock;
662 	char irq_name[32];
663 	struct pci_saved_state *pci_saved_state;
664 };
665 
666 struct kvm_irq_mask_notifier {
667 	void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
668 	int irq;
669 	struct hlist_node link;
670 };
671 
672 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
673 				    struct kvm_irq_mask_notifier *kimn);
674 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
675 				      struct kvm_irq_mask_notifier *kimn);
676 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
677 			     bool mask);
678 
679 #ifdef __KVM_HAVE_IOAPIC
680 void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
681 				   union kvm_ioapic_redirect_entry *entry,
682 				   unsigned long *deliver_bitmask);
683 #endif
684 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
685 int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
686 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
687 		int irq_source_id, int level);
688 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
689 void kvm_register_irq_ack_notifier(struct kvm *kvm,
690 				   struct kvm_irq_ack_notifier *kian);
691 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
692 				   struct kvm_irq_ack_notifier *kian);
693 int kvm_request_irq_source_id(struct kvm *kvm);
694 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
695 
696 /* For vcpu->arch.iommu_flags */
697 #define KVM_IOMMU_CACHE_COHERENCY	0x1
698 
699 #ifdef CONFIG_IOMMU_API
700 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
701 void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
702 int kvm_iommu_map_guest(struct kvm *kvm);
703 int kvm_iommu_unmap_guest(struct kvm *kvm);
704 int kvm_assign_device(struct kvm *kvm,
705 		      struct kvm_assigned_dev_kernel *assigned_dev);
706 int kvm_deassign_device(struct kvm *kvm,
707 			struct kvm_assigned_dev_kernel *assigned_dev);
708 #else /* CONFIG_IOMMU_API */
709 static inline int kvm_iommu_map_pages(struct kvm *kvm,
710 				      struct kvm_memory_slot *slot)
711 {
712 	return 0;
713 }
714 
715 static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
716 					 struct kvm_memory_slot *slot)
717 {
718 }
719 
720 static inline int kvm_iommu_map_guest(struct kvm *kvm)
721 {
722 	return -ENODEV;
723 }
724 
725 static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
726 {
727 	return 0;
728 }
729 
730 static inline int kvm_assign_device(struct kvm *kvm,
731 		struct kvm_assigned_dev_kernel *assigned_dev)
732 {
733 	return 0;
734 }
735 
736 static inline int kvm_deassign_device(struct kvm *kvm,
737 		struct kvm_assigned_dev_kernel *assigned_dev)
738 {
739 	return 0;
740 }
741 #endif /* CONFIG_IOMMU_API */
742 
743 static inline void kvm_guest_enter(void)
744 {
745 	BUG_ON(preemptible());
746 	/*
747 	 * This is running in ioctl context so we can avoid
748 	 * the call to vtime_account() with its unnecessary idle check.
749 	 */
750 	vtime_account_system_irqsafe(current);
751 	current->flags |= PF_VCPU;
752 	/* KVM does not hold any references to rcu protected data when it
753 	 * switches CPU into a guest mode. In fact switching to a guest mode
754 	 * is very similar to exiting to userspase from rcu point of view. In
755 	 * addition CPU may stay in a guest mode for quite a long time (up to
756 	 * one time slice). Lets treat guest mode as quiescent state, just like
757 	 * we do with user-mode execution.
758 	 */
759 	rcu_virt_note_context_switch(smp_processor_id());
760 }
761 
762 static inline void kvm_guest_exit(void)
763 {
764 	/*
765 	 * This is running in ioctl context so we can avoid
766 	 * the call to vtime_account() with its unnecessary idle check.
767 	 */
768 	vtime_account_system_irqsafe(current);
769 	current->flags &= ~PF_VCPU;
770 }
771 
772 /*
773  * search_memslots() and __gfn_to_memslot() are here because they are
774  * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
775  * gfn_to_memslot() itself isn't here as an inline because that would
776  * bloat other code too much.
777  */
778 static inline struct kvm_memory_slot *
779 search_memslots(struct kvm_memslots *slots, gfn_t gfn)
780 {
781 	struct kvm_memory_slot *memslot;
782 
783 	kvm_for_each_memslot(memslot, slots)
784 		if (gfn >= memslot->base_gfn &&
785 		      gfn < memslot->base_gfn + memslot->npages)
786 			return memslot;
787 
788 	return NULL;
789 }
790 
791 static inline struct kvm_memory_slot *
792 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
793 {
794 	return search_memslots(slots, gfn);
795 }
796 
797 static inline unsigned long
798 __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
799 {
800 	return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
801 }
802 
803 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
804 {
805 	return gfn_to_memslot(kvm, gfn)->id;
806 }
807 
808 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
809 {
810 	/* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
811 	return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
812 		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
813 }
814 
815 static inline gfn_t
816 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
817 {
818 	gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
819 
820 	return slot->base_gfn + gfn_offset;
821 }
822 
823 static inline gpa_t gfn_to_gpa(gfn_t gfn)
824 {
825 	return (gpa_t)gfn << PAGE_SHIFT;
826 }
827 
828 static inline gfn_t gpa_to_gfn(gpa_t gpa)
829 {
830 	return (gfn_t)(gpa >> PAGE_SHIFT);
831 }
832 
833 static inline hpa_t pfn_to_hpa(pfn_t pfn)
834 {
835 	return (hpa_t)pfn << PAGE_SHIFT;
836 }
837 
838 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
839 {
840 	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
841 }
842 
843 enum kvm_stat_kind {
844 	KVM_STAT_VM,
845 	KVM_STAT_VCPU,
846 };
847 
848 struct kvm_stats_debugfs_item {
849 	const char *name;
850 	int offset;
851 	enum kvm_stat_kind kind;
852 	struct dentry *dentry;
853 };
854 extern struct kvm_stats_debugfs_item debugfs_entries[];
855 extern struct dentry *kvm_debugfs_dir;
856 
857 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
858 static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
859 {
860 	if (unlikely(kvm->mmu_notifier_count))
861 		return 1;
862 	/*
863 	 * Ensure the read of mmu_notifier_count happens before the read
864 	 * of mmu_notifier_seq.  This interacts with the smp_wmb() in
865 	 * mmu_notifier_invalidate_range_end to make sure that the caller
866 	 * either sees the old (non-zero) value of mmu_notifier_count or
867 	 * the new (incremented) value of mmu_notifier_seq.
868 	 * PowerPC Book3s HV KVM calls this under a per-page lock
869 	 * rather than under kvm->mmu_lock, for scalability, so
870 	 * can't rely on kvm->mmu_lock to keep things ordered.
871 	 */
872 	smp_rmb();
873 	if (kvm->mmu_notifier_seq != mmu_seq)
874 		return 1;
875 	return 0;
876 }
877 #endif
878 
879 #ifdef KVM_CAP_IRQ_ROUTING
880 
881 #define KVM_MAX_IRQ_ROUTES 1024
882 
883 int kvm_setup_default_irq_routing(struct kvm *kvm);
884 int kvm_set_irq_routing(struct kvm *kvm,
885 			const struct kvm_irq_routing_entry *entries,
886 			unsigned nr,
887 			unsigned flags);
888 void kvm_free_irq_routing(struct kvm *kvm);
889 
890 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
891 
892 #else
893 
894 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
895 
896 #endif
897 
898 #ifdef CONFIG_HAVE_KVM_EVENTFD
899 
900 void kvm_eventfd_init(struct kvm *kvm);
901 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
902 
903 #ifdef CONFIG_HAVE_KVM_IRQCHIP
904 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
905 void kvm_irqfd_release(struct kvm *kvm);
906 void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
907 #else
908 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
909 {
910 	return -EINVAL;
911 }
912 
913 static inline void kvm_irqfd_release(struct kvm *kvm) {}
914 #endif
915 
916 #else
917 
918 static inline void kvm_eventfd_init(struct kvm *kvm) {}
919 
920 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
921 {
922 	return -EINVAL;
923 }
924 
925 static inline void kvm_irqfd_release(struct kvm *kvm) {}
926 
927 #ifdef CONFIG_HAVE_KVM_IRQCHIP
928 static inline void kvm_irq_routing_update(struct kvm *kvm,
929 					  struct kvm_irq_routing_table *irq_rt)
930 {
931 	rcu_assign_pointer(kvm->irq_routing, irq_rt);
932 }
933 #endif
934 
935 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
936 {
937 	return -ENOSYS;
938 }
939 
940 #endif /* CONFIG_HAVE_KVM_EVENTFD */
941 
942 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
943 static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
944 {
945 	return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
946 }
947 
948 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
949 
950 #else
951 
952 static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
953 
954 #endif
955 
956 #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
957 
958 long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
959 				  unsigned long arg);
960 
961 #else
962 
963 static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
964 						unsigned long arg)
965 {
966 	return -ENOTTY;
967 }
968 
969 #endif
970 
971 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
972 {
973 	set_bit(req, &vcpu->requests);
974 }
975 
976 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
977 {
978 	if (test_bit(req, &vcpu->requests)) {
979 		clear_bit(req, &vcpu->requests);
980 		return true;
981 	} else {
982 		return false;
983 	}
984 }
985 
986 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
987 
988 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
989 {
990 	vcpu->spin_loop.in_spin_loop = val;
991 }
992 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
993 {
994 	vcpu->spin_loop.dy_eligible = val;
995 }
996 
997 #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
998 
999 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1000 {
1001 }
1002 
1003 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1004 {
1005 }
1006 
1007 static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
1008 {
1009 	return true;
1010 }
1011 
1012 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1013 #endif
1014 
1015