xref: /linux-6.15/include/linux/kvm_host.h (revision e1858b2a)
1 #ifndef __KVM_HOST_H
2 #define __KVM_HOST_H
3 
4 /*
5  * This work is licensed under the terms of the GNU GPL, version 2.  See
6  * the COPYING file in the top-level directory.
7  */
8 
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/bug.h>
17 #include <linux/mm.h>
18 #include <linux/mmu_notifier.h>
19 #include <linux/preempt.h>
20 #include <linux/msi.h>
21 #include <linux/slab.h>
22 #include <linux/rcupdate.h>
23 #include <linux/ratelimit.h>
24 #include <linux/err.h>
25 #include <asm/signal.h>
26 
27 #include <linux/kvm.h>
28 #include <linux/kvm_para.h>
29 
30 #include <linux/kvm_types.h>
31 
32 #include <asm/kvm_host.h>
33 
34 #ifndef KVM_MMIO_SIZE
35 #define KVM_MMIO_SIZE 8
36 #endif
37 
38 /*
39  * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
40  * in kvm, other bits are visible for userspace which are defined in
41  * include/linux/kvm_h.
42  */
43 #define KVM_MEMSLOT_INVALID	(1UL << 16)
44 
45 /*
46  * If we support unaligned MMIO, at most one fragment will be split into two:
47  */
48 #ifdef KVM_UNALIGNED_MMIO
49 #  define KVM_EXTRA_MMIO_FRAGMENTS 1
50 #else
51 #  define KVM_EXTRA_MMIO_FRAGMENTS 0
52 #endif
53 
54 #define KVM_USER_MMIO_SIZE 8
55 
56 #define KVM_MAX_MMIO_FRAGMENTS \
57 	(KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS)
58 
59 /*
60  * For the normal pfn, the highest 12 bits should be zero,
61  * so we can mask these bits to indicate the error.
62  */
63 #define KVM_PFN_ERR_MASK	(0xfffULL << 52)
64 
65 #define KVM_PFN_ERR_FAULT	(KVM_PFN_ERR_MASK)
66 #define KVM_PFN_ERR_HWPOISON	(KVM_PFN_ERR_MASK + 1)
67 #define KVM_PFN_ERR_BAD		(KVM_PFN_ERR_MASK + 2)
68 #define KVM_PFN_ERR_RO_FAULT	(KVM_PFN_ERR_MASK + 3)
69 
70 static inline bool is_error_pfn(pfn_t pfn)
71 {
72 	return !!(pfn & KVM_PFN_ERR_MASK);
73 }
74 
75 static inline bool is_noslot_pfn(pfn_t pfn)
76 {
77 	return pfn == KVM_PFN_ERR_BAD;
78 }
79 
80 static inline bool is_invalid_pfn(pfn_t pfn)
81 {
82 	return !is_noslot_pfn(pfn) && is_error_pfn(pfn);
83 }
84 
85 #define KVM_HVA_ERR_BAD		(PAGE_OFFSET)
86 #define KVM_HVA_ERR_RO_BAD	(PAGE_OFFSET + PAGE_SIZE)
87 
88 static inline bool kvm_is_error_hva(unsigned long addr)
89 {
90 	return addr >= PAGE_OFFSET;
91 }
92 
93 #define KVM_ERR_PTR_BAD_PAGE	(ERR_PTR(-ENOENT))
94 
95 static inline bool is_error_page(struct page *page)
96 {
97 	return IS_ERR(page);
98 }
99 
100 /*
101  * vcpu->requests bit members
102  */
103 #define KVM_REQ_TLB_FLUSH          0
104 #define KVM_REQ_MIGRATE_TIMER      1
105 #define KVM_REQ_REPORT_TPR_ACCESS  2
106 #define KVM_REQ_MMU_RELOAD         3
107 #define KVM_REQ_TRIPLE_FAULT       4
108 #define KVM_REQ_PENDING_TIMER      5
109 #define KVM_REQ_UNHALT             6
110 #define KVM_REQ_MMU_SYNC           7
111 #define KVM_REQ_CLOCK_UPDATE       8
112 #define KVM_REQ_KICK               9
113 #define KVM_REQ_DEACTIVATE_FPU    10
114 #define KVM_REQ_EVENT             11
115 #define KVM_REQ_APF_HALT          12
116 #define KVM_REQ_STEAL_UPDATE      13
117 #define KVM_REQ_NMI               14
118 #define KVM_REQ_IMMEDIATE_EXIT    15
119 #define KVM_REQ_PMU               16
120 #define KVM_REQ_PMI               17
121 
122 #define KVM_USERSPACE_IRQ_SOURCE_ID		0
123 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID	1
124 
125 struct kvm;
126 struct kvm_vcpu;
127 extern struct kmem_cache *kvm_vcpu_cache;
128 
129 struct kvm_io_range {
130 	gpa_t addr;
131 	int len;
132 	struct kvm_io_device *dev;
133 };
134 
135 #define NR_IOBUS_DEVS 1000
136 
137 struct kvm_io_bus {
138 	int                   dev_count;
139 	struct kvm_io_range range[];
140 };
141 
142 enum kvm_bus {
143 	KVM_MMIO_BUS,
144 	KVM_PIO_BUS,
145 	KVM_NR_BUSES
146 };
147 
148 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
149 		     int len, const void *val);
150 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
151 		    void *val);
152 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
153 			    int len, struct kvm_io_device *dev);
154 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
155 			      struct kvm_io_device *dev);
156 
157 #ifdef CONFIG_KVM_ASYNC_PF
158 struct kvm_async_pf {
159 	struct work_struct work;
160 	struct list_head link;
161 	struct list_head queue;
162 	struct kvm_vcpu *vcpu;
163 	struct mm_struct *mm;
164 	gva_t gva;
165 	unsigned long addr;
166 	struct kvm_arch_async_pf arch;
167 	struct page *page;
168 	bool done;
169 };
170 
171 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
172 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
173 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
174 		       struct kvm_arch_async_pf *arch);
175 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
176 #endif
177 
178 enum {
179 	OUTSIDE_GUEST_MODE,
180 	IN_GUEST_MODE,
181 	EXITING_GUEST_MODE,
182 	READING_SHADOW_PAGE_TABLES,
183 };
184 
185 /*
186  * Sometimes a large or cross-page mmio needs to be broken up into separate
187  * exits for userspace servicing.
188  */
189 struct kvm_mmio_fragment {
190 	gpa_t gpa;
191 	void *data;
192 	unsigned len;
193 };
194 
195 struct kvm_vcpu {
196 	struct kvm *kvm;
197 #ifdef CONFIG_PREEMPT_NOTIFIERS
198 	struct preempt_notifier preempt_notifier;
199 #endif
200 	int cpu;
201 	int vcpu_id;
202 	int srcu_idx;
203 	int mode;
204 	unsigned long requests;
205 	unsigned long guest_debug;
206 
207 	struct mutex mutex;
208 	struct kvm_run *run;
209 
210 	int fpu_active;
211 	int guest_fpu_loaded, guest_xcr0_loaded;
212 	wait_queue_head_t wq;
213 	struct pid *pid;
214 	int sigset_active;
215 	sigset_t sigset;
216 	struct kvm_vcpu_stat stat;
217 
218 #ifdef CONFIG_HAS_IOMEM
219 	int mmio_needed;
220 	int mmio_read_completed;
221 	int mmio_is_write;
222 	int mmio_cur_fragment;
223 	int mmio_nr_fragments;
224 	struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
225 #endif
226 
227 #ifdef CONFIG_KVM_ASYNC_PF
228 	struct {
229 		u32 queued;
230 		struct list_head queue;
231 		struct list_head done;
232 		spinlock_t lock;
233 	} async_pf;
234 #endif
235 
236 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
237 	/*
238 	 * Cpu relax intercept or pause loop exit optimization
239 	 * in_spin_loop: set when a vcpu does a pause loop exit
240 	 *  or cpu relax intercepted.
241 	 * dy_eligible: indicates whether vcpu is eligible for directed yield.
242 	 */
243 	struct {
244 		bool in_spin_loop;
245 		bool dy_eligible;
246 	} spin_loop;
247 #endif
248 	struct kvm_vcpu_arch arch;
249 };
250 
251 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
252 {
253 	return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
254 }
255 
256 /*
257  * Some of the bitops functions do not support too long bitmaps.
258  * This number must be determined not to exceed such limits.
259  */
260 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
261 
262 struct kvm_memory_slot {
263 	gfn_t base_gfn;
264 	unsigned long npages;
265 	unsigned long flags;
266 	unsigned long *dirty_bitmap;
267 	struct kvm_arch_memory_slot arch;
268 	unsigned long userspace_addr;
269 	int user_alloc;
270 	int id;
271 };
272 
273 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
274 {
275 	return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
276 }
277 
278 struct kvm_kernel_irq_routing_entry {
279 	u32 gsi;
280 	u32 type;
281 	int (*set)(struct kvm_kernel_irq_routing_entry *e,
282 		   struct kvm *kvm, int irq_source_id, int level);
283 	union {
284 		struct {
285 			unsigned irqchip;
286 			unsigned pin;
287 		} irqchip;
288 		struct msi_msg msi;
289 	};
290 	struct hlist_node link;
291 };
292 
293 #ifdef __KVM_HAVE_IOAPIC
294 
295 struct kvm_irq_routing_table {
296 	int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS];
297 	struct kvm_kernel_irq_routing_entry *rt_entries;
298 	u32 nr_rt_entries;
299 	/*
300 	 * Array indexed by gsi. Each entry contains list of irq chips
301 	 * the gsi is connected to.
302 	 */
303 	struct hlist_head map[0];
304 };
305 
306 #else
307 
308 struct kvm_irq_routing_table {};
309 
310 #endif
311 
312 #ifndef KVM_MEM_SLOTS_NUM
313 #define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
314 #endif
315 
316 /*
317  * Note:
318  * memslots are not sorted by id anymore, please use id_to_memslot()
319  * to get the memslot by its id.
320  */
321 struct kvm_memslots {
322 	u64 generation;
323 	struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
324 	/* The mapping table from slot id to the index in memslots[]. */
325 	int id_to_index[KVM_MEM_SLOTS_NUM];
326 };
327 
328 struct kvm {
329 	spinlock_t mmu_lock;
330 	struct mutex slots_lock;
331 	struct mm_struct *mm; /* userspace tied to this vm */
332 	struct kvm_memslots *memslots;
333 	struct srcu_struct srcu;
334 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
335 	u32 bsp_vcpu_id;
336 #endif
337 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
338 	atomic_t online_vcpus;
339 	int last_boosted_vcpu;
340 	struct list_head vm_list;
341 	struct mutex lock;
342 	struct kvm_io_bus *buses[KVM_NR_BUSES];
343 #ifdef CONFIG_HAVE_KVM_EVENTFD
344 	struct {
345 		spinlock_t        lock;
346 		struct list_head  items;
347 		struct list_head  resampler_list;
348 		struct mutex      resampler_lock;
349 	} irqfds;
350 	struct list_head ioeventfds;
351 #endif
352 	struct kvm_vm_stat stat;
353 	struct kvm_arch arch;
354 	atomic_t users_count;
355 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
356 	struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
357 	spinlock_t ring_lock;
358 	struct list_head coalesced_zones;
359 #endif
360 
361 	struct mutex irq_lock;
362 #ifdef CONFIG_HAVE_KVM_IRQCHIP
363 	/*
364 	 * Update side is protected by irq_lock and,
365 	 * if configured, irqfds.lock.
366 	 */
367 	struct kvm_irq_routing_table __rcu *irq_routing;
368 	struct hlist_head mask_notifier_list;
369 	struct hlist_head irq_ack_notifier_list;
370 #endif
371 
372 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
373 	struct mmu_notifier mmu_notifier;
374 	unsigned long mmu_notifier_seq;
375 	long mmu_notifier_count;
376 #endif
377 	long tlbs_dirty;
378 };
379 
380 #define kvm_err(fmt, ...) \
381 	pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
382 #define kvm_info(fmt, ...) \
383 	pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
384 #define kvm_debug(fmt, ...) \
385 	pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
386 #define kvm_pr_unimpl(fmt, ...) \
387 	pr_err_ratelimited("kvm [%i]: " fmt, \
388 			   task_tgid_nr(current), ## __VA_ARGS__)
389 
390 /* The guest did something we don't support. */
391 #define vcpu_unimpl(vcpu, fmt, ...)					\
392 	kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
393 
394 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
395 {
396 	smp_rmb();
397 	return kvm->vcpus[i];
398 }
399 
400 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
401 	for (idx = 0; \
402 	     idx < atomic_read(&kvm->online_vcpus) && \
403 	     (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
404 	     idx++)
405 
406 #define kvm_for_each_memslot(memslot, slots)	\
407 	for (memslot = &slots->memslots[0];	\
408 	      memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
409 		memslot++)
410 
411 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
412 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
413 
414 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
415 void vcpu_put(struct kvm_vcpu *vcpu);
416 
417 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
418 		  struct module *module);
419 void kvm_exit(void);
420 
421 void kvm_get_kvm(struct kvm *kvm);
422 void kvm_put_kvm(struct kvm *kvm);
423 void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new);
424 
425 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
426 {
427 	return rcu_dereference_check(kvm->memslots,
428 			srcu_read_lock_held(&kvm->srcu)
429 			|| lockdep_is_held(&kvm->slots_lock));
430 }
431 
432 static inline struct kvm_memory_slot *
433 id_to_memslot(struct kvm_memslots *slots, int id)
434 {
435 	int index = slots->id_to_index[id];
436 	struct kvm_memory_slot *slot;
437 
438 	slot = &slots->memslots[index];
439 
440 	WARN_ON(slot->id != id);
441 	return slot;
442 }
443 
444 int kvm_set_memory_region(struct kvm *kvm,
445 			  struct kvm_userspace_memory_region *mem,
446 			  int user_alloc);
447 int __kvm_set_memory_region(struct kvm *kvm,
448 			    struct kvm_userspace_memory_region *mem,
449 			    int user_alloc);
450 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
451 			   struct kvm_memory_slot *dont);
452 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
453 int kvm_arch_prepare_memory_region(struct kvm *kvm,
454 				struct kvm_memory_slot *memslot,
455 				struct kvm_memory_slot old,
456 				struct kvm_userspace_memory_region *mem,
457 				int user_alloc);
458 void kvm_arch_commit_memory_region(struct kvm *kvm,
459 				struct kvm_userspace_memory_region *mem,
460 				struct kvm_memory_slot old,
461 				int user_alloc);
462 bool kvm_largepages_enabled(void);
463 void kvm_disable_largepages(void);
464 /* flush all memory translations */
465 void kvm_arch_flush_shadow_all(struct kvm *kvm);
466 /* flush memory translations pointing to 'slot' */
467 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
468 				   struct kvm_memory_slot *slot);
469 
470 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
471 			    int nr_pages);
472 
473 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
474 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
475 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
476 void kvm_release_page_clean(struct page *page);
477 void kvm_release_page_dirty(struct page *page);
478 void kvm_set_page_dirty(struct page *page);
479 void kvm_set_page_accessed(struct page *page);
480 
481 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
482 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
483 		       bool write_fault, bool *writable);
484 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
485 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
486 		      bool *writable);
487 pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
488 pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
489 
490 void kvm_release_pfn_dirty(pfn_t pfn);
491 void kvm_release_pfn_clean(pfn_t pfn);
492 void kvm_set_pfn_dirty(pfn_t pfn);
493 void kvm_set_pfn_accessed(pfn_t pfn);
494 void kvm_get_pfn(pfn_t pfn);
495 
496 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
497 			int len);
498 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
499 			  unsigned long len);
500 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
501 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
502 			   void *data, unsigned long len);
503 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
504 			 int offset, int len);
505 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
506 		    unsigned long len);
507 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
508 			   void *data, unsigned long len);
509 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
510 			      gpa_t gpa);
511 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
512 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
513 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
514 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
515 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
516 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
517 void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
518 			     gfn_t gfn);
519 
520 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
521 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
522 bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
523 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
524 void kvm_resched(struct kvm_vcpu *vcpu);
525 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
526 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
527 
528 void kvm_flush_remote_tlbs(struct kvm *kvm);
529 void kvm_reload_remote_mmus(struct kvm *kvm);
530 
531 long kvm_arch_dev_ioctl(struct file *filp,
532 			unsigned int ioctl, unsigned long arg);
533 long kvm_arch_vcpu_ioctl(struct file *filp,
534 			 unsigned int ioctl, unsigned long arg);
535 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
536 
537 int kvm_dev_ioctl_check_extension(long ext);
538 
539 int kvm_get_dirty_log(struct kvm *kvm,
540 			struct kvm_dirty_log *log, int *is_dirty);
541 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
542 				struct kvm_dirty_log *log);
543 
544 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
545 				   struct
546 				   kvm_userspace_memory_region *mem,
547 				   int user_alloc);
548 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level);
549 long kvm_arch_vm_ioctl(struct file *filp,
550 		       unsigned int ioctl, unsigned long arg);
551 
552 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
553 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
554 
555 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
556 				    struct kvm_translation *tr);
557 
558 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
559 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
560 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
561 				  struct kvm_sregs *sregs);
562 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
563 				  struct kvm_sregs *sregs);
564 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
565 				    struct kvm_mp_state *mp_state);
566 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
567 				    struct kvm_mp_state *mp_state);
568 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
569 					struct kvm_guest_debug *dbg);
570 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
571 
572 int kvm_arch_init(void *opaque);
573 void kvm_arch_exit(void);
574 
575 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
576 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
577 
578 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
579 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
580 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
581 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
582 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
583 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
584 
585 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
586 int kvm_arch_hardware_enable(void *garbage);
587 void kvm_arch_hardware_disable(void *garbage);
588 int kvm_arch_hardware_setup(void);
589 void kvm_arch_hardware_unsetup(void);
590 void kvm_arch_check_processor_compat(void *rtn);
591 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
592 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
593 
594 void kvm_free_physmem(struct kvm *kvm);
595 
596 void *kvm_kvzalloc(unsigned long size);
597 void kvm_kvfree(const void *addr);
598 
599 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
600 static inline struct kvm *kvm_arch_alloc_vm(void)
601 {
602 	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
603 }
604 
605 static inline void kvm_arch_free_vm(struct kvm *kvm)
606 {
607 	kfree(kvm);
608 }
609 #endif
610 
611 static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
612 {
613 #ifdef __KVM_HAVE_ARCH_WQP
614 	return vcpu->arch.wqp;
615 #else
616 	return &vcpu->wq;
617 #endif
618 }
619 
620 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
621 void kvm_arch_destroy_vm(struct kvm *kvm);
622 void kvm_free_all_assigned_devices(struct kvm *kvm);
623 void kvm_arch_sync_events(struct kvm *kvm);
624 
625 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
626 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
627 
628 bool kvm_is_mmio_pfn(pfn_t pfn);
629 
630 struct kvm_irq_ack_notifier {
631 	struct hlist_node link;
632 	unsigned gsi;
633 	void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
634 };
635 
636 struct kvm_assigned_dev_kernel {
637 	struct kvm_irq_ack_notifier ack_notifier;
638 	struct list_head list;
639 	int assigned_dev_id;
640 	int host_segnr;
641 	int host_busnr;
642 	int host_devfn;
643 	unsigned int entries_nr;
644 	int host_irq;
645 	bool host_irq_disabled;
646 	bool pci_2_3;
647 	struct msix_entry *host_msix_entries;
648 	int guest_irq;
649 	struct msix_entry *guest_msix_entries;
650 	unsigned long irq_requested_type;
651 	int irq_source_id;
652 	int flags;
653 	struct pci_dev *dev;
654 	struct kvm *kvm;
655 	spinlock_t intx_lock;
656 	spinlock_t intx_mask_lock;
657 	char irq_name[32];
658 	struct pci_saved_state *pci_saved_state;
659 };
660 
661 struct kvm_irq_mask_notifier {
662 	void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
663 	int irq;
664 	struct hlist_node link;
665 };
666 
667 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
668 				    struct kvm_irq_mask_notifier *kimn);
669 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
670 				      struct kvm_irq_mask_notifier *kimn);
671 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
672 			     bool mask);
673 
674 #ifdef __KVM_HAVE_IOAPIC
675 void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
676 				   union kvm_ioapic_redirect_entry *entry,
677 				   unsigned long *deliver_bitmask);
678 #endif
679 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
680 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
681 		int irq_source_id, int level);
682 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
683 void kvm_register_irq_ack_notifier(struct kvm *kvm,
684 				   struct kvm_irq_ack_notifier *kian);
685 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
686 				   struct kvm_irq_ack_notifier *kian);
687 int kvm_request_irq_source_id(struct kvm *kvm);
688 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
689 
690 /* For vcpu->arch.iommu_flags */
691 #define KVM_IOMMU_CACHE_COHERENCY	0x1
692 
693 #ifdef CONFIG_IOMMU_API
694 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
695 void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
696 int kvm_iommu_map_guest(struct kvm *kvm);
697 int kvm_iommu_unmap_guest(struct kvm *kvm);
698 int kvm_assign_device(struct kvm *kvm,
699 		      struct kvm_assigned_dev_kernel *assigned_dev);
700 int kvm_deassign_device(struct kvm *kvm,
701 			struct kvm_assigned_dev_kernel *assigned_dev);
702 #else /* CONFIG_IOMMU_API */
703 static inline int kvm_iommu_map_pages(struct kvm *kvm,
704 				      struct kvm_memory_slot *slot)
705 {
706 	return 0;
707 }
708 
709 static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
710 					 struct kvm_memory_slot *slot)
711 {
712 }
713 
714 static inline int kvm_iommu_map_guest(struct kvm *kvm)
715 {
716 	return -ENODEV;
717 }
718 
719 static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
720 {
721 	return 0;
722 }
723 
724 static inline int kvm_assign_device(struct kvm *kvm,
725 		struct kvm_assigned_dev_kernel *assigned_dev)
726 {
727 	return 0;
728 }
729 
730 static inline int kvm_deassign_device(struct kvm *kvm,
731 		struct kvm_assigned_dev_kernel *assigned_dev)
732 {
733 	return 0;
734 }
735 #endif /* CONFIG_IOMMU_API */
736 
737 static inline void kvm_guest_enter(void)
738 {
739 	BUG_ON(preemptible());
740 	vtime_account(current);
741 	current->flags |= PF_VCPU;
742 	/* KVM does not hold any references to rcu protected data when it
743 	 * switches CPU into a guest mode. In fact switching to a guest mode
744 	 * is very similar to exiting to userspase from rcu point of view. In
745 	 * addition CPU may stay in a guest mode for quite a long time (up to
746 	 * one time slice). Lets treat guest mode as quiescent state, just like
747 	 * we do with user-mode execution.
748 	 */
749 	rcu_virt_note_context_switch(smp_processor_id());
750 }
751 
752 static inline void kvm_guest_exit(void)
753 {
754 	vtime_account(current);
755 	current->flags &= ~PF_VCPU;
756 }
757 
758 /*
759  * search_memslots() and __gfn_to_memslot() are here because they are
760  * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
761  * gfn_to_memslot() itself isn't here as an inline because that would
762  * bloat other code too much.
763  */
764 static inline struct kvm_memory_slot *
765 search_memslots(struct kvm_memslots *slots, gfn_t gfn)
766 {
767 	struct kvm_memory_slot *memslot;
768 
769 	kvm_for_each_memslot(memslot, slots)
770 		if (gfn >= memslot->base_gfn &&
771 		      gfn < memslot->base_gfn + memslot->npages)
772 			return memslot;
773 
774 	return NULL;
775 }
776 
777 static inline struct kvm_memory_slot *
778 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
779 {
780 	return search_memslots(slots, gfn);
781 }
782 
783 static inline unsigned long
784 __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
785 {
786 	return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
787 }
788 
789 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
790 {
791 	return gfn_to_memslot(kvm, gfn)->id;
792 }
793 
794 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
795 {
796 	/* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
797 	return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
798 		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
799 }
800 
801 static inline gfn_t
802 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
803 {
804 	gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
805 
806 	return slot->base_gfn + gfn_offset;
807 }
808 
809 static inline gpa_t gfn_to_gpa(gfn_t gfn)
810 {
811 	return (gpa_t)gfn << PAGE_SHIFT;
812 }
813 
814 static inline gfn_t gpa_to_gfn(gpa_t gpa)
815 {
816 	return (gfn_t)(gpa >> PAGE_SHIFT);
817 }
818 
819 static inline hpa_t pfn_to_hpa(pfn_t pfn)
820 {
821 	return (hpa_t)pfn << PAGE_SHIFT;
822 }
823 
824 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
825 {
826 	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
827 }
828 
829 enum kvm_stat_kind {
830 	KVM_STAT_VM,
831 	KVM_STAT_VCPU,
832 };
833 
834 struct kvm_stats_debugfs_item {
835 	const char *name;
836 	int offset;
837 	enum kvm_stat_kind kind;
838 	struct dentry *dentry;
839 };
840 extern struct kvm_stats_debugfs_item debugfs_entries[];
841 extern struct dentry *kvm_debugfs_dir;
842 
843 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
844 static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
845 {
846 	if (unlikely(vcpu->kvm->mmu_notifier_count))
847 		return 1;
848 	/*
849 	 * Ensure the read of mmu_notifier_count happens before the read
850 	 * of mmu_notifier_seq.  This interacts with the smp_wmb() in
851 	 * mmu_notifier_invalidate_range_end to make sure that the caller
852 	 * either sees the old (non-zero) value of mmu_notifier_count or
853 	 * the new (incremented) value of mmu_notifier_seq.
854 	 * PowerPC Book3s HV KVM calls this under a per-page lock
855 	 * rather than under kvm->mmu_lock, for scalability, so
856 	 * can't rely on kvm->mmu_lock to keep things ordered.
857 	 */
858 	smp_rmb();
859 	if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
860 		return 1;
861 	return 0;
862 }
863 #endif
864 
865 #ifdef KVM_CAP_IRQ_ROUTING
866 
867 #define KVM_MAX_IRQ_ROUTES 1024
868 
869 int kvm_setup_default_irq_routing(struct kvm *kvm);
870 int kvm_set_irq_routing(struct kvm *kvm,
871 			const struct kvm_irq_routing_entry *entries,
872 			unsigned nr,
873 			unsigned flags);
874 void kvm_free_irq_routing(struct kvm *kvm);
875 
876 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
877 
878 #else
879 
880 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
881 
882 #endif
883 
884 #ifdef CONFIG_HAVE_KVM_EVENTFD
885 
886 void kvm_eventfd_init(struct kvm *kvm);
887 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
888 void kvm_irqfd_release(struct kvm *kvm);
889 void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
890 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
891 
892 #else
893 
894 static inline void kvm_eventfd_init(struct kvm *kvm) {}
895 
896 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
897 {
898 	return -EINVAL;
899 }
900 
901 static inline void kvm_irqfd_release(struct kvm *kvm) {}
902 
903 #ifdef CONFIG_HAVE_KVM_IRQCHIP
904 static inline void kvm_irq_routing_update(struct kvm *kvm,
905 					  struct kvm_irq_routing_table *irq_rt)
906 {
907 	rcu_assign_pointer(kvm->irq_routing, irq_rt);
908 }
909 #endif
910 
911 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
912 {
913 	return -ENOSYS;
914 }
915 
916 #endif /* CONFIG_HAVE_KVM_EVENTFD */
917 
918 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
919 static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
920 {
921 	return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
922 }
923 
924 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
925 
926 #else
927 
928 static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
929 
930 #endif
931 
932 #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
933 
934 long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
935 				  unsigned long arg);
936 
937 #else
938 
939 static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
940 						unsigned long arg)
941 {
942 	return -ENOTTY;
943 }
944 
945 #endif
946 
947 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
948 {
949 	set_bit(req, &vcpu->requests);
950 }
951 
952 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
953 {
954 	if (test_bit(req, &vcpu->requests)) {
955 		clear_bit(req, &vcpu->requests);
956 		return true;
957 	} else {
958 		return false;
959 	}
960 }
961 
962 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
963 
964 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
965 {
966 	vcpu->spin_loop.in_spin_loop = val;
967 }
968 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
969 {
970 	vcpu->spin_loop.dy_eligible = val;
971 }
972 
973 #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
974 
975 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
976 {
977 }
978 
979 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
980 {
981 }
982 
983 static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
984 {
985 	return true;
986 }
987 
988 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
989 #endif
990 
991