xref: /linux-6.15/include/linux/kvm_host.h (revision e756bc56)
1 #ifndef __KVM_HOST_H
2 #define __KVM_HOST_H
3 
4 /*
5  * This work is licensed under the terms of the GNU GPL, version 2.  See
6  * the COPYING file in the top-level directory.
7  */
8 
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/bug.h>
17 #include <linux/mm.h>
18 #include <linux/mmu_notifier.h>
19 #include <linux/preempt.h>
20 #include <linux/msi.h>
21 #include <linux/slab.h>
22 #include <linux/rcupdate.h>
23 #include <linux/ratelimit.h>
24 #include <linux/err.h>
25 #include <linux/irqflags.h>
26 #include <linux/context_tracking.h>
27 #include <asm/signal.h>
28 
29 #include <linux/kvm.h>
30 #include <linux/kvm_para.h>
31 
32 #include <linux/kvm_types.h>
33 
34 #include <asm/kvm_host.h>
35 
36 #ifndef KVM_MMIO_SIZE
37 #define KVM_MMIO_SIZE 8
38 #endif
39 
40 /*
41  * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
42  * in kvm, other bits are visible for userspace which are defined in
43  * include/linux/kvm_h.
44  */
45 #define KVM_MEMSLOT_INVALID	(1UL << 16)
46 
47 /* Two fragments for cross MMIO pages. */
48 #define KVM_MAX_MMIO_FRAGMENTS	2
49 
50 /*
51  * For the normal pfn, the highest 12 bits should be zero,
52  * so we can mask bit 62 ~ bit 52  to indicate the error pfn,
53  * mask bit 63 to indicate the noslot pfn.
54  */
55 #define KVM_PFN_ERR_MASK	(0x7ffULL << 52)
56 #define KVM_PFN_ERR_NOSLOT_MASK	(0xfffULL << 52)
57 #define KVM_PFN_NOSLOT		(0x1ULL << 63)
58 
59 #define KVM_PFN_ERR_FAULT	(KVM_PFN_ERR_MASK)
60 #define KVM_PFN_ERR_HWPOISON	(KVM_PFN_ERR_MASK + 1)
61 #define KVM_PFN_ERR_RO_FAULT	(KVM_PFN_ERR_MASK + 2)
62 
63 /*
64  * error pfns indicate that the gfn is in slot but faild to
65  * translate it to pfn on host.
66  */
67 static inline bool is_error_pfn(pfn_t pfn)
68 {
69 	return !!(pfn & KVM_PFN_ERR_MASK);
70 }
71 
72 /*
73  * error_noslot pfns indicate that the gfn can not be
74  * translated to pfn - it is not in slot or failed to
75  * translate it to pfn.
76  */
77 static inline bool is_error_noslot_pfn(pfn_t pfn)
78 {
79 	return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
80 }
81 
82 /* noslot pfn indicates that the gfn is not in slot. */
83 static inline bool is_noslot_pfn(pfn_t pfn)
84 {
85 	return pfn == KVM_PFN_NOSLOT;
86 }
87 
88 /*
89  * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390)
90  * provide own defines and kvm_is_error_hva
91  */
92 #ifndef KVM_HVA_ERR_BAD
93 
94 #define KVM_HVA_ERR_BAD		(PAGE_OFFSET)
95 #define KVM_HVA_ERR_RO_BAD	(PAGE_OFFSET + PAGE_SIZE)
96 
97 static inline bool kvm_is_error_hva(unsigned long addr)
98 {
99 	return addr >= PAGE_OFFSET;
100 }
101 
102 #endif
103 
104 #define KVM_ERR_PTR_BAD_PAGE	(ERR_PTR(-ENOENT))
105 
106 static inline bool is_error_page(struct page *page)
107 {
108 	return IS_ERR(page);
109 }
110 
111 /*
112  * vcpu->requests bit members
113  */
114 #define KVM_REQ_TLB_FLUSH          0
115 #define KVM_REQ_MIGRATE_TIMER      1
116 #define KVM_REQ_REPORT_TPR_ACCESS  2
117 #define KVM_REQ_MMU_RELOAD         3
118 #define KVM_REQ_TRIPLE_FAULT       4
119 #define KVM_REQ_PENDING_TIMER      5
120 #define KVM_REQ_UNHALT             6
121 #define KVM_REQ_MMU_SYNC           7
122 #define KVM_REQ_CLOCK_UPDATE       8
123 #define KVM_REQ_KICK               9
124 #define KVM_REQ_DEACTIVATE_FPU    10
125 #define KVM_REQ_EVENT             11
126 #define KVM_REQ_APF_HALT          12
127 #define KVM_REQ_STEAL_UPDATE      13
128 #define KVM_REQ_NMI               14
129 #define KVM_REQ_PMU               15
130 #define KVM_REQ_PMI               16
131 #define KVM_REQ_WATCHDOG          17
132 #define KVM_REQ_MASTERCLOCK_UPDATE 18
133 #define KVM_REQ_MCLOCK_INPROGRESS 19
134 #define KVM_REQ_EPR_EXIT          20
135 #define KVM_REQ_SCAN_IOAPIC       21
136 #define KVM_REQ_GLOBAL_CLOCK_UPDATE 22
137 
138 #define KVM_USERSPACE_IRQ_SOURCE_ID		0
139 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID	1
140 
141 struct kvm;
142 struct kvm_vcpu;
143 extern struct kmem_cache *kvm_vcpu_cache;
144 
145 extern spinlock_t kvm_lock;
146 extern struct list_head vm_list;
147 
148 struct kvm_io_range {
149 	gpa_t addr;
150 	int len;
151 	struct kvm_io_device *dev;
152 };
153 
154 #define NR_IOBUS_DEVS 1000
155 
156 struct kvm_io_bus {
157 	int dev_count;
158 	int ioeventfd_count;
159 	struct kvm_io_range range[];
160 };
161 
162 enum kvm_bus {
163 	KVM_MMIO_BUS,
164 	KVM_PIO_BUS,
165 	KVM_VIRTIO_CCW_NOTIFY_BUS,
166 	KVM_NR_BUSES
167 };
168 
169 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
170 		     int len, const void *val);
171 int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
172 			    int len, const void *val, long cookie);
173 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
174 		    void *val);
175 int kvm_io_bus_read_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
176 			   int len, void *val, long cookie);
177 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
178 			    int len, struct kvm_io_device *dev);
179 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
180 			      struct kvm_io_device *dev);
181 
182 #ifdef CONFIG_KVM_ASYNC_PF
183 struct kvm_async_pf {
184 	struct work_struct work;
185 	struct list_head link;
186 	struct list_head queue;
187 	struct kvm_vcpu *vcpu;
188 	struct mm_struct *mm;
189 	gva_t gva;
190 	unsigned long addr;
191 	struct kvm_arch_async_pf arch;
192 	bool   wakeup_all;
193 };
194 
195 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
196 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
197 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
198 		       struct kvm_arch_async_pf *arch);
199 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
200 #endif
201 
202 enum {
203 	OUTSIDE_GUEST_MODE,
204 	IN_GUEST_MODE,
205 	EXITING_GUEST_MODE,
206 	READING_SHADOW_PAGE_TABLES,
207 };
208 
209 /*
210  * Sometimes a large or cross-page mmio needs to be broken up into separate
211  * exits for userspace servicing.
212  */
213 struct kvm_mmio_fragment {
214 	gpa_t gpa;
215 	void *data;
216 	unsigned len;
217 };
218 
219 struct kvm_vcpu {
220 	struct kvm *kvm;
221 #ifdef CONFIG_PREEMPT_NOTIFIERS
222 	struct preempt_notifier preempt_notifier;
223 #endif
224 	int cpu;
225 	int vcpu_id;
226 	int srcu_idx;
227 	int mode;
228 	unsigned long requests;
229 	unsigned long guest_debug;
230 
231 	struct mutex mutex;
232 	struct kvm_run *run;
233 
234 	int fpu_active;
235 	int guest_fpu_loaded, guest_xcr0_loaded;
236 	wait_queue_head_t wq;
237 	struct pid *pid;
238 	int sigset_active;
239 	sigset_t sigset;
240 	struct kvm_vcpu_stat stat;
241 
242 #ifdef CONFIG_HAS_IOMEM
243 	int mmio_needed;
244 	int mmio_read_completed;
245 	int mmio_is_write;
246 	int mmio_cur_fragment;
247 	int mmio_nr_fragments;
248 	struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
249 #endif
250 
251 #ifdef CONFIG_KVM_ASYNC_PF
252 	struct {
253 		u32 queued;
254 		struct list_head queue;
255 		struct list_head done;
256 		spinlock_t lock;
257 	} async_pf;
258 #endif
259 
260 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
261 	/*
262 	 * Cpu relax intercept or pause loop exit optimization
263 	 * in_spin_loop: set when a vcpu does a pause loop exit
264 	 *  or cpu relax intercepted.
265 	 * dy_eligible: indicates whether vcpu is eligible for directed yield.
266 	 */
267 	struct {
268 		bool in_spin_loop;
269 		bool dy_eligible;
270 	} spin_loop;
271 #endif
272 	bool preempted;
273 	struct kvm_vcpu_arch arch;
274 };
275 
276 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
277 {
278 	return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
279 }
280 
281 /*
282  * Some of the bitops functions do not support too long bitmaps.
283  * This number must be determined not to exceed such limits.
284  */
285 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
286 
287 struct kvm_memory_slot {
288 	gfn_t base_gfn;
289 	unsigned long npages;
290 	unsigned long *dirty_bitmap;
291 	struct kvm_arch_memory_slot arch;
292 	unsigned long userspace_addr;
293 	u32 flags;
294 	short id;
295 };
296 
297 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
298 {
299 	return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
300 }
301 
302 struct kvm_kernel_irq_routing_entry {
303 	u32 gsi;
304 	u32 type;
305 	int (*set)(struct kvm_kernel_irq_routing_entry *e,
306 		   struct kvm *kvm, int irq_source_id, int level,
307 		   bool line_status);
308 	union {
309 		struct {
310 			unsigned irqchip;
311 			unsigned pin;
312 		} irqchip;
313 		struct msi_msg msi;
314 	};
315 	struct hlist_node link;
316 };
317 
318 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
319 
320 struct kvm_irq_routing_table {
321 	int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
322 	struct kvm_kernel_irq_routing_entry *rt_entries;
323 	u32 nr_rt_entries;
324 	/*
325 	 * Array indexed by gsi. Each entry contains list of irq chips
326 	 * the gsi is connected to.
327 	 */
328 	struct hlist_head map[0];
329 };
330 
331 #else
332 
333 struct kvm_irq_routing_table {};
334 
335 #endif
336 
337 #ifndef KVM_PRIVATE_MEM_SLOTS
338 #define KVM_PRIVATE_MEM_SLOTS 0
339 #endif
340 
341 #ifndef KVM_MEM_SLOTS_NUM
342 #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
343 #endif
344 
345 /*
346  * Note:
347  * memslots are not sorted by id anymore, please use id_to_memslot()
348  * to get the memslot by its id.
349  */
350 struct kvm_memslots {
351 	u64 generation;
352 	struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
353 	/* The mapping table from slot id to the index in memslots[]. */
354 	short id_to_index[KVM_MEM_SLOTS_NUM];
355 };
356 
357 struct kvm {
358 	spinlock_t mmu_lock;
359 	struct mutex slots_lock;
360 	struct mm_struct *mm; /* userspace tied to this vm */
361 	struct kvm_memslots *memslots;
362 	struct srcu_struct srcu;
363 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
364 	u32 bsp_vcpu_id;
365 #endif
366 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
367 	atomic_t online_vcpus;
368 	int last_boosted_vcpu;
369 	struct list_head vm_list;
370 	struct mutex lock;
371 	struct kvm_io_bus *buses[KVM_NR_BUSES];
372 #ifdef CONFIG_HAVE_KVM_EVENTFD
373 	struct {
374 		spinlock_t        lock;
375 		struct list_head  items;
376 		struct list_head  resampler_list;
377 		struct mutex      resampler_lock;
378 	} irqfds;
379 	struct list_head ioeventfds;
380 #endif
381 	struct kvm_vm_stat stat;
382 	struct kvm_arch arch;
383 	atomic_t users_count;
384 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
385 	struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
386 	spinlock_t ring_lock;
387 	struct list_head coalesced_zones;
388 #endif
389 
390 	struct mutex irq_lock;
391 #ifdef CONFIG_HAVE_KVM_IRQCHIP
392 	/*
393 	 * Update side is protected by irq_lock and,
394 	 * if configured, irqfds.lock.
395 	 */
396 	struct kvm_irq_routing_table __rcu *irq_routing;
397 	struct hlist_head mask_notifier_list;
398 	struct hlist_head irq_ack_notifier_list;
399 #endif
400 
401 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
402 	struct mmu_notifier mmu_notifier;
403 	unsigned long mmu_notifier_seq;
404 	long mmu_notifier_count;
405 #endif
406 	long tlbs_dirty;
407 	struct list_head devices;
408 };
409 
410 #define kvm_err(fmt, ...) \
411 	pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
412 #define kvm_info(fmt, ...) \
413 	pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
414 #define kvm_debug(fmt, ...) \
415 	pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
416 #define kvm_pr_unimpl(fmt, ...) \
417 	pr_err_ratelimited("kvm [%i]: " fmt, \
418 			   task_tgid_nr(current), ## __VA_ARGS__)
419 
420 /* The guest did something we don't support. */
421 #define vcpu_unimpl(vcpu, fmt, ...)					\
422 	kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
423 
424 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
425 {
426 	smp_rmb();
427 	return kvm->vcpus[i];
428 }
429 
430 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
431 	for (idx = 0; \
432 	     idx < atomic_read(&kvm->online_vcpus) && \
433 	     (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
434 	     idx++)
435 
436 #define kvm_for_each_memslot(memslot, slots)	\
437 	for (memslot = &slots->memslots[0];	\
438 	      memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
439 		memslot++)
440 
441 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
442 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
443 
444 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
445 void vcpu_put(struct kvm_vcpu *vcpu);
446 
447 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
448 int kvm_irqfd_init(void);
449 void kvm_irqfd_exit(void);
450 #else
451 static inline int kvm_irqfd_init(void)
452 {
453 	return 0;
454 }
455 
456 static inline void kvm_irqfd_exit(void)
457 {
458 }
459 #endif
460 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
461 		  struct module *module);
462 void kvm_exit(void);
463 
464 void kvm_get_kvm(struct kvm *kvm);
465 void kvm_put_kvm(struct kvm *kvm);
466 void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new,
467 		     u64 last_generation);
468 
469 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
470 {
471 	return rcu_dereference_check(kvm->memslots,
472 			srcu_read_lock_held(&kvm->srcu)
473 			|| lockdep_is_held(&kvm->slots_lock));
474 }
475 
476 static inline struct kvm_memory_slot *
477 id_to_memslot(struct kvm_memslots *slots, int id)
478 {
479 	int index = slots->id_to_index[id];
480 	struct kvm_memory_slot *slot;
481 
482 	slot = &slots->memslots[index];
483 
484 	WARN_ON(slot->id != id);
485 	return slot;
486 }
487 
488 /*
489  * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
490  * - create a new memory slot
491  * - delete an existing memory slot
492  * - modify an existing memory slot
493  *   -- move it in the guest physical memory space
494  *   -- just change its flags
495  *
496  * Since flags can be changed by some of these operations, the following
497  * differentiation is the best we can do for __kvm_set_memory_region():
498  */
499 enum kvm_mr_change {
500 	KVM_MR_CREATE,
501 	KVM_MR_DELETE,
502 	KVM_MR_MOVE,
503 	KVM_MR_FLAGS_ONLY,
504 };
505 
506 int kvm_set_memory_region(struct kvm *kvm,
507 			  struct kvm_userspace_memory_region *mem);
508 int __kvm_set_memory_region(struct kvm *kvm,
509 			    struct kvm_userspace_memory_region *mem);
510 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
511 			   struct kvm_memory_slot *dont);
512 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
513 			    unsigned long npages);
514 void kvm_arch_memslots_updated(struct kvm *kvm);
515 int kvm_arch_prepare_memory_region(struct kvm *kvm,
516 				struct kvm_memory_slot *memslot,
517 				struct kvm_userspace_memory_region *mem,
518 				enum kvm_mr_change change);
519 void kvm_arch_commit_memory_region(struct kvm *kvm,
520 				struct kvm_userspace_memory_region *mem,
521 				const struct kvm_memory_slot *old,
522 				enum kvm_mr_change change);
523 bool kvm_largepages_enabled(void);
524 void kvm_disable_largepages(void);
525 /* flush all memory translations */
526 void kvm_arch_flush_shadow_all(struct kvm *kvm);
527 /* flush memory translations pointing to 'slot' */
528 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
529 				   struct kvm_memory_slot *slot);
530 
531 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
532 			    int nr_pages);
533 
534 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
535 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
536 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
537 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
538 void kvm_release_page_clean(struct page *page);
539 void kvm_release_page_dirty(struct page *page);
540 void kvm_set_page_dirty(struct page *page);
541 void kvm_set_page_accessed(struct page *page);
542 
543 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
544 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
545 		       bool write_fault, bool *writable);
546 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
547 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
548 		      bool *writable);
549 pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
550 pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
551 
552 void kvm_release_pfn_dirty(pfn_t pfn);
553 void kvm_release_pfn_clean(pfn_t pfn);
554 void kvm_set_pfn_dirty(pfn_t pfn);
555 void kvm_set_pfn_accessed(pfn_t pfn);
556 void kvm_get_pfn(pfn_t pfn);
557 
558 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
559 			int len);
560 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
561 			  unsigned long len);
562 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
563 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
564 			   void *data, unsigned long len);
565 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
566 			 int offset, int len);
567 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
568 		    unsigned long len);
569 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
570 			   void *data, unsigned long len);
571 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
572 			      gpa_t gpa, unsigned long len);
573 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
574 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
575 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
576 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
577 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
578 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
579 void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
580 			     gfn_t gfn);
581 
582 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
583 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
584 bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
585 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
586 void kvm_resched(struct kvm_vcpu *vcpu);
587 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
588 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
589 
590 void kvm_flush_remote_tlbs(struct kvm *kvm);
591 void kvm_reload_remote_mmus(struct kvm *kvm);
592 void kvm_make_mclock_inprogress_request(struct kvm *kvm);
593 void kvm_make_scan_ioapic_request(struct kvm *kvm);
594 
595 long kvm_arch_dev_ioctl(struct file *filp,
596 			unsigned int ioctl, unsigned long arg);
597 long kvm_arch_vcpu_ioctl(struct file *filp,
598 			 unsigned int ioctl, unsigned long arg);
599 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
600 
601 int kvm_dev_ioctl_check_extension(long ext);
602 
603 int kvm_get_dirty_log(struct kvm *kvm,
604 			struct kvm_dirty_log *log, int *is_dirty);
605 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
606 				struct kvm_dirty_log *log);
607 
608 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
609 				   struct kvm_userspace_memory_region *mem);
610 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
611 			bool line_status);
612 long kvm_arch_vm_ioctl(struct file *filp,
613 		       unsigned int ioctl, unsigned long arg);
614 
615 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
616 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
617 
618 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
619 				    struct kvm_translation *tr);
620 
621 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
622 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
623 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
624 				  struct kvm_sregs *sregs);
625 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
626 				  struct kvm_sregs *sregs);
627 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
628 				    struct kvm_mp_state *mp_state);
629 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
630 				    struct kvm_mp_state *mp_state);
631 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
632 					struct kvm_guest_debug *dbg);
633 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
634 
635 int kvm_arch_init(void *opaque);
636 void kvm_arch_exit(void);
637 
638 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
639 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
640 
641 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
642 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
643 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
644 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
645 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
646 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
647 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
648 
649 int kvm_arch_hardware_enable(void *garbage);
650 void kvm_arch_hardware_disable(void *garbage);
651 int kvm_arch_hardware_setup(void);
652 void kvm_arch_hardware_unsetup(void);
653 void kvm_arch_check_processor_compat(void *rtn);
654 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
655 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
656 
657 void kvm_free_physmem(struct kvm *kvm);
658 
659 void *kvm_kvzalloc(unsigned long size);
660 void kvm_kvfree(const void *addr);
661 
662 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
663 static inline struct kvm *kvm_arch_alloc_vm(void)
664 {
665 	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
666 }
667 
668 static inline void kvm_arch_free_vm(struct kvm *kvm)
669 {
670 	kfree(kvm);
671 }
672 #endif
673 
674 #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
675 void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
676 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
677 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
678 #else
679 static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
680 {
681 }
682 
683 static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
684 {
685 }
686 
687 static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
688 {
689 	return false;
690 }
691 #endif
692 
693 static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
694 {
695 #ifdef __KVM_HAVE_ARCH_WQP
696 	return vcpu->arch.wqp;
697 #else
698 	return &vcpu->wq;
699 #endif
700 }
701 
702 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
703 void kvm_arch_destroy_vm(struct kvm *kvm);
704 void kvm_arch_sync_events(struct kvm *kvm);
705 
706 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
707 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
708 
709 bool kvm_is_mmio_pfn(pfn_t pfn);
710 
711 struct kvm_irq_ack_notifier {
712 	struct hlist_node link;
713 	unsigned gsi;
714 	void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
715 };
716 
717 struct kvm_assigned_dev_kernel {
718 	struct kvm_irq_ack_notifier ack_notifier;
719 	struct list_head list;
720 	int assigned_dev_id;
721 	int host_segnr;
722 	int host_busnr;
723 	int host_devfn;
724 	unsigned int entries_nr;
725 	int host_irq;
726 	bool host_irq_disabled;
727 	bool pci_2_3;
728 	struct msix_entry *host_msix_entries;
729 	int guest_irq;
730 	struct msix_entry *guest_msix_entries;
731 	unsigned long irq_requested_type;
732 	int irq_source_id;
733 	int flags;
734 	struct pci_dev *dev;
735 	struct kvm *kvm;
736 	spinlock_t intx_lock;
737 	spinlock_t intx_mask_lock;
738 	char irq_name[32];
739 	struct pci_saved_state *pci_saved_state;
740 };
741 
742 struct kvm_irq_mask_notifier {
743 	void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
744 	int irq;
745 	struct hlist_node link;
746 };
747 
748 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
749 				    struct kvm_irq_mask_notifier *kimn);
750 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
751 				      struct kvm_irq_mask_notifier *kimn);
752 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
753 			     bool mask);
754 
755 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
756 		bool line_status);
757 int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
758 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
759 		int irq_source_id, int level, bool line_status);
760 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
761 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
762 void kvm_register_irq_ack_notifier(struct kvm *kvm,
763 				   struct kvm_irq_ack_notifier *kian);
764 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
765 				   struct kvm_irq_ack_notifier *kian);
766 int kvm_request_irq_source_id(struct kvm *kvm);
767 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
768 
769 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
770 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
771 void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
772 int kvm_iommu_map_guest(struct kvm *kvm);
773 int kvm_iommu_unmap_guest(struct kvm *kvm);
774 int kvm_assign_device(struct kvm *kvm,
775 		      struct kvm_assigned_dev_kernel *assigned_dev);
776 int kvm_deassign_device(struct kvm *kvm,
777 			struct kvm_assigned_dev_kernel *assigned_dev);
778 #else
779 static inline int kvm_iommu_map_pages(struct kvm *kvm,
780 				      struct kvm_memory_slot *slot)
781 {
782 	return 0;
783 }
784 
785 static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
786 					 struct kvm_memory_slot *slot)
787 {
788 }
789 
790 static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
791 {
792 	return 0;
793 }
794 #endif
795 
796 static inline void kvm_guest_enter(void)
797 {
798 	unsigned long flags;
799 
800 	BUG_ON(preemptible());
801 
802 	local_irq_save(flags);
803 	guest_enter();
804 	local_irq_restore(flags);
805 
806 	/* KVM does not hold any references to rcu protected data when it
807 	 * switches CPU into a guest mode. In fact switching to a guest mode
808 	 * is very similar to exiting to userspace from rcu point of view. In
809 	 * addition CPU may stay in a guest mode for quite a long time (up to
810 	 * one time slice). Lets treat guest mode as quiescent state, just like
811 	 * we do with user-mode execution.
812 	 */
813 	rcu_virt_note_context_switch(smp_processor_id());
814 }
815 
816 static inline void kvm_guest_exit(void)
817 {
818 	unsigned long flags;
819 
820 	local_irq_save(flags);
821 	guest_exit();
822 	local_irq_restore(flags);
823 }
824 
825 /*
826  * search_memslots() and __gfn_to_memslot() are here because they are
827  * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
828  * gfn_to_memslot() itself isn't here as an inline because that would
829  * bloat other code too much.
830  */
831 static inline struct kvm_memory_slot *
832 search_memslots(struct kvm_memslots *slots, gfn_t gfn)
833 {
834 	struct kvm_memory_slot *memslot;
835 
836 	kvm_for_each_memslot(memslot, slots)
837 		if (gfn >= memslot->base_gfn &&
838 		      gfn < memslot->base_gfn + memslot->npages)
839 			return memslot;
840 
841 	return NULL;
842 }
843 
844 static inline struct kvm_memory_slot *
845 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
846 {
847 	return search_memslots(slots, gfn);
848 }
849 
850 static inline unsigned long
851 __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
852 {
853 	return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
854 }
855 
856 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
857 {
858 	return gfn_to_memslot(kvm, gfn)->id;
859 }
860 
861 static inline gfn_t
862 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
863 {
864 	gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
865 
866 	return slot->base_gfn + gfn_offset;
867 }
868 
869 static inline gpa_t gfn_to_gpa(gfn_t gfn)
870 {
871 	return (gpa_t)gfn << PAGE_SHIFT;
872 }
873 
874 static inline gfn_t gpa_to_gfn(gpa_t gpa)
875 {
876 	return (gfn_t)(gpa >> PAGE_SHIFT);
877 }
878 
879 static inline hpa_t pfn_to_hpa(pfn_t pfn)
880 {
881 	return (hpa_t)pfn << PAGE_SHIFT;
882 }
883 
884 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
885 {
886 	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
887 }
888 
889 enum kvm_stat_kind {
890 	KVM_STAT_VM,
891 	KVM_STAT_VCPU,
892 };
893 
894 struct kvm_stats_debugfs_item {
895 	const char *name;
896 	int offset;
897 	enum kvm_stat_kind kind;
898 	struct dentry *dentry;
899 };
900 extern struct kvm_stats_debugfs_item debugfs_entries[];
901 extern struct dentry *kvm_debugfs_dir;
902 
903 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
904 static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
905 {
906 	if (unlikely(kvm->mmu_notifier_count))
907 		return 1;
908 	/*
909 	 * Ensure the read of mmu_notifier_count happens before the read
910 	 * of mmu_notifier_seq.  This interacts with the smp_wmb() in
911 	 * mmu_notifier_invalidate_range_end to make sure that the caller
912 	 * either sees the old (non-zero) value of mmu_notifier_count or
913 	 * the new (incremented) value of mmu_notifier_seq.
914 	 * PowerPC Book3s HV KVM calls this under a per-page lock
915 	 * rather than under kvm->mmu_lock, for scalability, so
916 	 * can't rely on kvm->mmu_lock to keep things ordered.
917 	 */
918 	smp_rmb();
919 	if (kvm->mmu_notifier_seq != mmu_seq)
920 		return 1;
921 	return 0;
922 }
923 #endif
924 
925 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
926 
927 #define KVM_MAX_IRQ_ROUTES 1024
928 
929 int kvm_setup_default_irq_routing(struct kvm *kvm);
930 int kvm_set_irq_routing(struct kvm *kvm,
931 			const struct kvm_irq_routing_entry *entries,
932 			unsigned nr,
933 			unsigned flags);
934 int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
935 			  struct kvm_kernel_irq_routing_entry *e,
936 			  const struct kvm_irq_routing_entry *ue);
937 void kvm_free_irq_routing(struct kvm *kvm);
938 
939 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
940 
941 #else
942 
943 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
944 
945 #endif
946 
947 #ifdef CONFIG_HAVE_KVM_EVENTFD
948 
949 void kvm_eventfd_init(struct kvm *kvm);
950 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
951 
952 #ifdef CONFIG_HAVE_KVM_IRQCHIP
953 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
954 void kvm_irqfd_release(struct kvm *kvm);
955 void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
956 #else
957 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
958 {
959 	return -EINVAL;
960 }
961 
962 static inline void kvm_irqfd_release(struct kvm *kvm) {}
963 #endif
964 
965 #else
966 
967 static inline void kvm_eventfd_init(struct kvm *kvm) {}
968 
969 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
970 {
971 	return -EINVAL;
972 }
973 
974 static inline void kvm_irqfd_release(struct kvm *kvm) {}
975 
976 #ifdef CONFIG_HAVE_KVM_IRQCHIP
977 static inline void kvm_irq_routing_update(struct kvm *kvm,
978 					  struct kvm_irq_routing_table *irq_rt)
979 {
980 	rcu_assign_pointer(kvm->irq_routing, irq_rt);
981 }
982 #endif
983 
984 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
985 {
986 	return -ENOSYS;
987 }
988 
989 #endif /* CONFIG_HAVE_KVM_EVENTFD */
990 
991 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
992 static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
993 {
994 	return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
995 }
996 
997 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
998 
999 #else
1000 
1001 static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
1002 
1003 #endif
1004 
1005 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
1006 
1007 long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
1008 				  unsigned long arg);
1009 
1010 void kvm_free_all_assigned_devices(struct kvm *kvm);
1011 
1012 #else
1013 
1014 static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
1015 						unsigned long arg)
1016 {
1017 	return -ENOTTY;
1018 }
1019 
1020 static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {}
1021 
1022 #endif
1023 
1024 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
1025 {
1026 	set_bit(req, &vcpu->requests);
1027 }
1028 
1029 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
1030 {
1031 	if (test_bit(req, &vcpu->requests)) {
1032 		clear_bit(req, &vcpu->requests);
1033 		return true;
1034 	} else {
1035 		return false;
1036 	}
1037 }
1038 
1039 extern bool kvm_rebooting;
1040 
1041 struct kvm_device_ops;
1042 
1043 struct kvm_device {
1044 	struct kvm_device_ops *ops;
1045 	struct kvm *kvm;
1046 	void *private;
1047 	struct list_head vm_node;
1048 };
1049 
1050 /* create, destroy, and name are mandatory */
1051 struct kvm_device_ops {
1052 	const char *name;
1053 	int (*create)(struct kvm_device *dev, u32 type);
1054 
1055 	/*
1056 	 * Destroy is responsible for freeing dev.
1057 	 *
1058 	 * Destroy may be called before or after destructors are called
1059 	 * on emulated I/O regions, depending on whether a reference is
1060 	 * held by a vcpu or other kvm component that gets destroyed
1061 	 * after the emulated I/O.
1062 	 */
1063 	void (*destroy)(struct kvm_device *dev);
1064 
1065 	int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1066 	int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1067 	int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1068 	long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
1069 		      unsigned long arg);
1070 };
1071 
1072 void kvm_device_get(struct kvm_device *dev);
1073 void kvm_device_put(struct kvm_device *dev);
1074 struct kvm_device *kvm_device_from_filp(struct file *filp);
1075 
1076 extern struct kvm_device_ops kvm_mpic_ops;
1077 extern struct kvm_device_ops kvm_xics_ops;
1078 extern struct kvm_device_ops kvm_vfio_ops;
1079 
1080 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1081 
1082 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1083 {
1084 	vcpu->spin_loop.in_spin_loop = val;
1085 }
1086 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1087 {
1088 	vcpu->spin_loop.dy_eligible = val;
1089 }
1090 
1091 #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1092 
1093 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1094 {
1095 }
1096 
1097 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1098 {
1099 }
1100 
1101 static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
1102 {
1103 	return true;
1104 }
1105 
1106 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1107 #endif
1108 
1109