xref: /linux-6.15/include/linux/kvm_host.h (revision c819e2cf)
1 #ifndef __KVM_HOST_H
2 #define __KVM_HOST_H
3 
4 /*
5  * This work is licensed under the terms of the GNU GPL, version 2.  See
6  * the COPYING file in the top-level directory.
7  */
8 
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/bug.h>
17 #include <linux/mm.h>
18 #include <linux/mmu_notifier.h>
19 #include <linux/preempt.h>
20 #include <linux/msi.h>
21 #include <linux/slab.h>
22 #include <linux/rcupdate.h>
23 #include <linux/ratelimit.h>
24 #include <linux/err.h>
25 #include <linux/irqflags.h>
26 #include <linux/context_tracking.h>
27 #include <asm/signal.h>
28 
29 #include <linux/kvm.h>
30 #include <linux/kvm_para.h>
31 
32 #include <linux/kvm_types.h>
33 
34 #include <asm/kvm_host.h>
35 
36 #ifndef KVM_MMIO_SIZE
37 #define KVM_MMIO_SIZE 8
38 #endif
39 
40 /*
41  * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
42  * in kvm, other bits are visible for userspace which are defined in
43  * include/linux/kvm_h.
44  */
45 #define KVM_MEMSLOT_INVALID	(1UL << 16)
46 #define KVM_MEMSLOT_INCOHERENT	(1UL << 17)
47 
48 /* Two fragments for cross MMIO pages. */
49 #define KVM_MAX_MMIO_FRAGMENTS	2
50 
51 /*
52  * For the normal pfn, the highest 12 bits should be zero,
53  * so we can mask bit 62 ~ bit 52  to indicate the error pfn,
54  * mask bit 63 to indicate the noslot pfn.
55  */
56 #define KVM_PFN_ERR_MASK	(0x7ffULL << 52)
57 #define KVM_PFN_ERR_NOSLOT_MASK	(0xfffULL << 52)
58 #define KVM_PFN_NOSLOT		(0x1ULL << 63)
59 
60 #define KVM_PFN_ERR_FAULT	(KVM_PFN_ERR_MASK)
61 #define KVM_PFN_ERR_HWPOISON	(KVM_PFN_ERR_MASK + 1)
62 #define KVM_PFN_ERR_RO_FAULT	(KVM_PFN_ERR_MASK + 2)
63 
64 /*
65  * error pfns indicate that the gfn is in slot but faild to
66  * translate it to pfn on host.
67  */
68 static inline bool is_error_pfn(pfn_t pfn)
69 {
70 	return !!(pfn & KVM_PFN_ERR_MASK);
71 }
72 
73 /*
74  * error_noslot pfns indicate that the gfn can not be
75  * translated to pfn - it is not in slot or failed to
76  * translate it to pfn.
77  */
78 static inline bool is_error_noslot_pfn(pfn_t pfn)
79 {
80 	return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
81 }
82 
83 /* noslot pfn indicates that the gfn is not in slot. */
84 static inline bool is_noslot_pfn(pfn_t pfn)
85 {
86 	return pfn == KVM_PFN_NOSLOT;
87 }
88 
89 /*
90  * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390)
91  * provide own defines and kvm_is_error_hva
92  */
93 #ifndef KVM_HVA_ERR_BAD
94 
95 #define KVM_HVA_ERR_BAD		(PAGE_OFFSET)
96 #define KVM_HVA_ERR_RO_BAD	(PAGE_OFFSET + PAGE_SIZE)
97 
98 static inline bool kvm_is_error_hva(unsigned long addr)
99 {
100 	return addr >= PAGE_OFFSET;
101 }
102 
103 #endif
104 
105 #define KVM_ERR_PTR_BAD_PAGE	(ERR_PTR(-ENOENT))
106 
107 static inline bool is_error_page(struct page *page)
108 {
109 	return IS_ERR(page);
110 }
111 
112 /*
113  * vcpu->requests bit members
114  */
115 #define KVM_REQ_TLB_FLUSH          0
116 #define KVM_REQ_MIGRATE_TIMER      1
117 #define KVM_REQ_REPORT_TPR_ACCESS  2
118 #define KVM_REQ_MMU_RELOAD         3
119 #define KVM_REQ_TRIPLE_FAULT       4
120 #define KVM_REQ_PENDING_TIMER      5
121 #define KVM_REQ_UNHALT             6
122 #define KVM_REQ_MMU_SYNC           7
123 #define KVM_REQ_CLOCK_UPDATE       8
124 #define KVM_REQ_KICK               9
125 #define KVM_REQ_DEACTIVATE_FPU    10
126 #define KVM_REQ_EVENT             11
127 #define KVM_REQ_APF_HALT          12
128 #define KVM_REQ_STEAL_UPDATE      13
129 #define KVM_REQ_NMI               14
130 #define KVM_REQ_PMU               15
131 #define KVM_REQ_PMI               16
132 #define KVM_REQ_WATCHDOG          17
133 #define KVM_REQ_MASTERCLOCK_UPDATE 18
134 #define KVM_REQ_MCLOCK_INPROGRESS 19
135 #define KVM_REQ_EPR_EXIT          20
136 #define KVM_REQ_SCAN_IOAPIC       21
137 #define KVM_REQ_GLOBAL_CLOCK_UPDATE 22
138 #define KVM_REQ_ENABLE_IBS        23
139 #define KVM_REQ_DISABLE_IBS       24
140 #define KVM_REQ_APIC_PAGE_RELOAD  25
141 
142 #define KVM_USERSPACE_IRQ_SOURCE_ID		0
143 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID	1
144 
145 extern struct kmem_cache *kvm_vcpu_cache;
146 
147 extern spinlock_t kvm_lock;
148 extern struct list_head vm_list;
149 
150 struct kvm_io_range {
151 	gpa_t addr;
152 	int len;
153 	struct kvm_io_device *dev;
154 };
155 
156 #define NR_IOBUS_DEVS 1000
157 
158 struct kvm_io_bus {
159 	int dev_count;
160 	int ioeventfd_count;
161 	struct kvm_io_range range[];
162 };
163 
164 enum kvm_bus {
165 	KVM_MMIO_BUS,
166 	KVM_PIO_BUS,
167 	KVM_VIRTIO_CCW_NOTIFY_BUS,
168 	KVM_FAST_MMIO_BUS,
169 	KVM_NR_BUSES
170 };
171 
172 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
173 		     int len, const void *val);
174 int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
175 			    int len, const void *val, long cookie);
176 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
177 		    void *val);
178 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
179 			    int len, struct kvm_io_device *dev);
180 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
181 			      struct kvm_io_device *dev);
182 
183 #ifdef CONFIG_KVM_ASYNC_PF
184 struct kvm_async_pf {
185 	struct work_struct work;
186 	struct list_head link;
187 	struct list_head queue;
188 	struct kvm_vcpu *vcpu;
189 	struct mm_struct *mm;
190 	gva_t gva;
191 	unsigned long addr;
192 	struct kvm_arch_async_pf arch;
193 	bool   wakeup_all;
194 };
195 
196 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
197 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
198 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
199 		       struct kvm_arch_async_pf *arch);
200 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
201 #endif
202 
203 /*
204  * Carry out a gup that requires IO. Allow the mm to relinquish the mmap
205  * semaphore if the filemap/swap has to wait on a page lock. pagep == NULL
206  * controls whether we retry the gup one more time to completion in that case.
207  * Typically this is called after a FAULT_FLAG_RETRY_NOWAIT in the main tdp
208  * handler.
209  */
210 int kvm_get_user_page_io(struct task_struct *tsk, struct mm_struct *mm,
211 			 unsigned long addr, bool write_fault,
212 			 struct page **pagep);
213 
214 enum {
215 	OUTSIDE_GUEST_MODE,
216 	IN_GUEST_MODE,
217 	EXITING_GUEST_MODE,
218 	READING_SHADOW_PAGE_TABLES,
219 };
220 
221 /*
222  * Sometimes a large or cross-page mmio needs to be broken up into separate
223  * exits for userspace servicing.
224  */
225 struct kvm_mmio_fragment {
226 	gpa_t gpa;
227 	void *data;
228 	unsigned len;
229 };
230 
231 struct kvm_vcpu {
232 	struct kvm *kvm;
233 #ifdef CONFIG_PREEMPT_NOTIFIERS
234 	struct preempt_notifier preempt_notifier;
235 #endif
236 	int cpu;
237 	int vcpu_id;
238 	int srcu_idx;
239 	int mode;
240 	unsigned long requests;
241 	unsigned long guest_debug;
242 
243 	struct mutex mutex;
244 	struct kvm_run *run;
245 
246 	int fpu_active;
247 	int guest_fpu_loaded, guest_xcr0_loaded;
248 	wait_queue_head_t wq;
249 	struct pid *pid;
250 	int sigset_active;
251 	sigset_t sigset;
252 	struct kvm_vcpu_stat stat;
253 
254 #ifdef CONFIG_HAS_IOMEM
255 	int mmio_needed;
256 	int mmio_read_completed;
257 	int mmio_is_write;
258 	int mmio_cur_fragment;
259 	int mmio_nr_fragments;
260 	struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
261 #endif
262 
263 #ifdef CONFIG_KVM_ASYNC_PF
264 	struct {
265 		u32 queued;
266 		struct list_head queue;
267 		struct list_head done;
268 		spinlock_t lock;
269 	} async_pf;
270 #endif
271 
272 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
273 	/*
274 	 * Cpu relax intercept or pause loop exit optimization
275 	 * in_spin_loop: set when a vcpu does a pause loop exit
276 	 *  or cpu relax intercepted.
277 	 * dy_eligible: indicates whether vcpu is eligible for directed yield.
278 	 */
279 	struct {
280 		bool in_spin_loop;
281 		bool dy_eligible;
282 	} spin_loop;
283 #endif
284 	bool preempted;
285 	struct kvm_vcpu_arch arch;
286 };
287 
288 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
289 {
290 	return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
291 }
292 
293 /*
294  * Some of the bitops functions do not support too long bitmaps.
295  * This number must be determined not to exceed such limits.
296  */
297 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
298 
299 struct kvm_memory_slot {
300 	gfn_t base_gfn;
301 	unsigned long npages;
302 	unsigned long *dirty_bitmap;
303 	struct kvm_arch_memory_slot arch;
304 	unsigned long userspace_addr;
305 	u32 flags;
306 	short id;
307 };
308 
309 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
310 {
311 	return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
312 }
313 
314 struct kvm_s390_adapter_int {
315 	u64 ind_addr;
316 	u64 summary_addr;
317 	u64 ind_offset;
318 	u32 summary_offset;
319 	u32 adapter_id;
320 };
321 
322 struct kvm_kernel_irq_routing_entry {
323 	u32 gsi;
324 	u32 type;
325 	int (*set)(struct kvm_kernel_irq_routing_entry *e,
326 		   struct kvm *kvm, int irq_source_id, int level,
327 		   bool line_status);
328 	union {
329 		struct {
330 			unsigned irqchip;
331 			unsigned pin;
332 		} irqchip;
333 		struct msi_msg msi;
334 		struct kvm_s390_adapter_int adapter;
335 	};
336 	struct hlist_node link;
337 };
338 
339 #ifndef KVM_PRIVATE_MEM_SLOTS
340 #define KVM_PRIVATE_MEM_SLOTS 0
341 #endif
342 
343 #ifndef KVM_MEM_SLOTS_NUM
344 #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
345 #endif
346 
347 /*
348  * Note:
349  * memslots are not sorted by id anymore, please use id_to_memslot()
350  * to get the memslot by its id.
351  */
352 struct kvm_memslots {
353 	u64 generation;
354 	struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
355 	/* The mapping table from slot id to the index in memslots[]. */
356 	short id_to_index[KVM_MEM_SLOTS_NUM];
357 	atomic_t lru_slot;
358 	int used_slots;
359 };
360 
361 struct kvm {
362 	spinlock_t mmu_lock;
363 	struct mutex slots_lock;
364 	struct mm_struct *mm; /* userspace tied to this vm */
365 	struct kvm_memslots *memslots;
366 	struct srcu_struct srcu;
367 	struct srcu_struct irq_srcu;
368 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
369 	u32 bsp_vcpu_id;
370 #endif
371 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
372 	atomic_t online_vcpus;
373 	int last_boosted_vcpu;
374 	struct list_head vm_list;
375 	struct mutex lock;
376 	struct kvm_io_bus *buses[KVM_NR_BUSES];
377 #ifdef CONFIG_HAVE_KVM_EVENTFD
378 	struct {
379 		spinlock_t        lock;
380 		struct list_head  items;
381 		struct list_head  resampler_list;
382 		struct mutex      resampler_lock;
383 	} irqfds;
384 	struct list_head ioeventfds;
385 #endif
386 	struct kvm_vm_stat stat;
387 	struct kvm_arch arch;
388 	atomic_t users_count;
389 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
390 	struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
391 	spinlock_t ring_lock;
392 	struct list_head coalesced_zones;
393 #endif
394 
395 	struct mutex irq_lock;
396 #ifdef CONFIG_HAVE_KVM_IRQCHIP
397 	/*
398 	 * Update side is protected by irq_lock.
399 	 */
400 	struct kvm_irq_routing_table __rcu *irq_routing;
401 #endif
402 #ifdef CONFIG_HAVE_KVM_IRQFD
403 	struct hlist_head irq_ack_notifier_list;
404 #endif
405 
406 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
407 	struct mmu_notifier mmu_notifier;
408 	unsigned long mmu_notifier_seq;
409 	long mmu_notifier_count;
410 #endif
411 	long tlbs_dirty;
412 	struct list_head devices;
413 };
414 
415 #define kvm_err(fmt, ...) \
416 	pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
417 #define kvm_info(fmt, ...) \
418 	pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
419 #define kvm_debug(fmt, ...) \
420 	pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
421 #define kvm_pr_unimpl(fmt, ...) \
422 	pr_err_ratelimited("kvm [%i]: " fmt, \
423 			   task_tgid_nr(current), ## __VA_ARGS__)
424 
425 /* The guest did something we don't support. */
426 #define vcpu_unimpl(vcpu, fmt, ...)					\
427 	kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
428 
429 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
430 {
431 	smp_rmb();
432 	return kvm->vcpus[i];
433 }
434 
435 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
436 	for (idx = 0; \
437 	     idx < atomic_read(&kvm->online_vcpus) && \
438 	     (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
439 	     idx++)
440 
441 #define kvm_for_each_memslot(memslot, slots)	\
442 	for (memslot = &slots->memslots[0];	\
443 	      memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
444 		memslot++)
445 
446 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
447 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
448 
449 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
450 void vcpu_put(struct kvm_vcpu *vcpu);
451 
452 #ifdef __KVM_HAVE_IOAPIC
453 void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
454 #else
455 static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
456 {
457 }
458 #endif
459 
460 #ifdef CONFIG_HAVE_KVM_IRQFD
461 int kvm_irqfd_init(void);
462 void kvm_irqfd_exit(void);
463 #else
464 static inline int kvm_irqfd_init(void)
465 {
466 	return 0;
467 }
468 
469 static inline void kvm_irqfd_exit(void)
470 {
471 }
472 #endif
473 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
474 		  struct module *module);
475 void kvm_exit(void);
476 
477 void kvm_get_kvm(struct kvm *kvm);
478 void kvm_put_kvm(struct kvm *kvm);
479 
480 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
481 {
482 	return rcu_dereference_check(kvm->memslots,
483 			srcu_read_lock_held(&kvm->srcu)
484 			|| lockdep_is_held(&kvm->slots_lock));
485 }
486 
487 static inline struct kvm_memory_slot *
488 id_to_memslot(struct kvm_memslots *slots, int id)
489 {
490 	int index = slots->id_to_index[id];
491 	struct kvm_memory_slot *slot;
492 
493 	slot = &slots->memslots[index];
494 
495 	WARN_ON(slot->id != id);
496 	return slot;
497 }
498 
499 /*
500  * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
501  * - create a new memory slot
502  * - delete an existing memory slot
503  * - modify an existing memory slot
504  *   -- move it in the guest physical memory space
505  *   -- just change its flags
506  *
507  * Since flags can be changed by some of these operations, the following
508  * differentiation is the best we can do for __kvm_set_memory_region():
509  */
510 enum kvm_mr_change {
511 	KVM_MR_CREATE,
512 	KVM_MR_DELETE,
513 	KVM_MR_MOVE,
514 	KVM_MR_FLAGS_ONLY,
515 };
516 
517 int kvm_set_memory_region(struct kvm *kvm,
518 			  struct kvm_userspace_memory_region *mem);
519 int __kvm_set_memory_region(struct kvm *kvm,
520 			    struct kvm_userspace_memory_region *mem);
521 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
522 			   struct kvm_memory_slot *dont);
523 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
524 			    unsigned long npages);
525 void kvm_arch_memslots_updated(struct kvm *kvm);
526 int kvm_arch_prepare_memory_region(struct kvm *kvm,
527 				struct kvm_memory_slot *memslot,
528 				struct kvm_userspace_memory_region *mem,
529 				enum kvm_mr_change change);
530 void kvm_arch_commit_memory_region(struct kvm *kvm,
531 				struct kvm_userspace_memory_region *mem,
532 				const struct kvm_memory_slot *old,
533 				enum kvm_mr_change change);
534 bool kvm_largepages_enabled(void);
535 void kvm_disable_largepages(void);
536 /* flush all memory translations */
537 void kvm_arch_flush_shadow_all(struct kvm *kvm);
538 /* flush memory translations pointing to 'slot' */
539 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
540 				   struct kvm_memory_slot *slot);
541 
542 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
543 			    int nr_pages);
544 
545 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
546 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
547 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
548 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
549 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
550 				      bool *writable);
551 void kvm_release_page_clean(struct page *page);
552 void kvm_release_page_dirty(struct page *page);
553 void kvm_set_page_accessed(struct page *page);
554 
555 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
556 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
557 		       bool write_fault, bool *writable);
558 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
559 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
560 		      bool *writable);
561 pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
562 pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
563 
564 void kvm_release_pfn_clean(pfn_t pfn);
565 void kvm_set_pfn_dirty(pfn_t pfn);
566 void kvm_set_pfn_accessed(pfn_t pfn);
567 void kvm_get_pfn(pfn_t pfn);
568 
569 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
570 			int len);
571 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
572 			  unsigned long len);
573 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
574 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
575 			   void *data, unsigned long len);
576 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
577 			 int offset, int len);
578 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
579 		    unsigned long len);
580 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
581 			   void *data, unsigned long len);
582 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
583 			      gpa_t gpa, unsigned long len);
584 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
585 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
586 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
587 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
588 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
589 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
590 
591 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
592 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
593 int kvm_vcpu_yield_to(struct kvm_vcpu *target);
594 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
595 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
596 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
597 
598 void kvm_flush_remote_tlbs(struct kvm *kvm);
599 void kvm_reload_remote_mmus(struct kvm *kvm);
600 void kvm_make_mclock_inprogress_request(struct kvm *kvm);
601 void kvm_make_scan_ioapic_request(struct kvm *kvm);
602 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
603 
604 long kvm_arch_dev_ioctl(struct file *filp,
605 			unsigned int ioctl, unsigned long arg);
606 long kvm_arch_vcpu_ioctl(struct file *filp,
607 			 unsigned int ioctl, unsigned long arg);
608 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
609 
610 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
611 
612 int kvm_get_dirty_log(struct kvm *kvm,
613 			struct kvm_dirty_log *log, int *is_dirty);
614 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
615 				struct kvm_dirty_log *log);
616 
617 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
618 			bool line_status);
619 long kvm_arch_vm_ioctl(struct file *filp,
620 		       unsigned int ioctl, unsigned long arg);
621 
622 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
623 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
624 
625 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
626 				    struct kvm_translation *tr);
627 
628 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
629 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
630 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
631 				  struct kvm_sregs *sregs);
632 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
633 				  struct kvm_sregs *sregs);
634 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
635 				    struct kvm_mp_state *mp_state);
636 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
637 				    struct kvm_mp_state *mp_state);
638 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
639 					struct kvm_guest_debug *dbg);
640 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
641 
642 int kvm_arch_init(void *opaque);
643 void kvm_arch_exit(void);
644 
645 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
646 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
647 
648 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
649 
650 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
651 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
652 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
653 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
654 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
655 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
656 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
657 
658 int kvm_arch_hardware_enable(void);
659 void kvm_arch_hardware_disable(void);
660 int kvm_arch_hardware_setup(void);
661 void kvm_arch_hardware_unsetup(void);
662 void kvm_arch_check_processor_compat(void *rtn);
663 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
664 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
665 
666 void *kvm_kvzalloc(unsigned long size);
667 void kvm_kvfree(const void *addr);
668 
669 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
670 static inline struct kvm *kvm_arch_alloc_vm(void)
671 {
672 	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
673 }
674 
675 static inline void kvm_arch_free_vm(struct kvm *kvm)
676 {
677 	kfree(kvm);
678 }
679 #endif
680 
681 #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
682 void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
683 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
684 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
685 #else
686 static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
687 {
688 }
689 
690 static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
691 {
692 }
693 
694 static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
695 {
696 	return false;
697 }
698 #endif
699 
700 static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
701 {
702 #ifdef __KVM_HAVE_ARCH_WQP
703 	return vcpu->arch.wqp;
704 #else
705 	return &vcpu->wq;
706 #endif
707 }
708 
709 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
710 void kvm_arch_destroy_vm(struct kvm *kvm);
711 void kvm_arch_sync_events(struct kvm *kvm);
712 
713 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
714 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
715 
716 bool kvm_is_reserved_pfn(pfn_t pfn);
717 
718 struct kvm_irq_ack_notifier {
719 	struct hlist_node link;
720 	unsigned gsi;
721 	void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
722 };
723 
724 int kvm_irq_map_gsi(struct kvm *kvm,
725 		    struct kvm_kernel_irq_routing_entry *entries, int gsi);
726 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
727 
728 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
729 		bool line_status);
730 int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
731 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
732 		int irq_source_id, int level, bool line_status);
733 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
734 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
735 void kvm_register_irq_ack_notifier(struct kvm *kvm,
736 				   struct kvm_irq_ack_notifier *kian);
737 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
738 				   struct kvm_irq_ack_notifier *kian);
739 int kvm_request_irq_source_id(struct kvm *kvm);
740 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
741 
742 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
743 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
744 void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
745 #else
746 static inline int kvm_iommu_map_pages(struct kvm *kvm,
747 				      struct kvm_memory_slot *slot)
748 {
749 	return 0;
750 }
751 
752 static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
753 					 struct kvm_memory_slot *slot)
754 {
755 }
756 #endif
757 
758 static inline void kvm_guest_enter(void)
759 {
760 	unsigned long flags;
761 
762 	BUG_ON(preemptible());
763 
764 	local_irq_save(flags);
765 	guest_enter();
766 	local_irq_restore(flags);
767 
768 	/* KVM does not hold any references to rcu protected data when it
769 	 * switches CPU into a guest mode. In fact switching to a guest mode
770 	 * is very similar to exiting to userspace from rcu point of view. In
771 	 * addition CPU may stay in a guest mode for quite a long time (up to
772 	 * one time slice). Lets treat guest mode as quiescent state, just like
773 	 * we do with user-mode execution.
774 	 */
775 	rcu_virt_note_context_switch(smp_processor_id());
776 }
777 
778 static inline void kvm_guest_exit(void)
779 {
780 	unsigned long flags;
781 
782 	local_irq_save(flags);
783 	guest_exit();
784 	local_irq_restore(flags);
785 }
786 
787 /*
788  * search_memslots() and __gfn_to_memslot() are here because they are
789  * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
790  * gfn_to_memslot() itself isn't here as an inline because that would
791  * bloat other code too much.
792  */
793 static inline struct kvm_memory_slot *
794 search_memslots(struct kvm_memslots *slots, gfn_t gfn)
795 {
796 	int start = 0, end = slots->used_slots;
797 	int slot = atomic_read(&slots->lru_slot);
798 	struct kvm_memory_slot *memslots = slots->memslots;
799 
800 	if (gfn >= memslots[slot].base_gfn &&
801 	    gfn < memslots[slot].base_gfn + memslots[slot].npages)
802 		return &memslots[slot];
803 
804 	while (start < end) {
805 		slot = start + (end - start) / 2;
806 
807 		if (gfn >= memslots[slot].base_gfn)
808 			end = slot;
809 		else
810 			start = slot + 1;
811 	}
812 
813 	if (gfn >= memslots[start].base_gfn &&
814 	    gfn < memslots[start].base_gfn + memslots[start].npages) {
815 		atomic_set(&slots->lru_slot, start);
816 		return &memslots[start];
817 	}
818 
819 	return NULL;
820 }
821 
822 static inline struct kvm_memory_slot *
823 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
824 {
825 	return search_memslots(slots, gfn);
826 }
827 
828 static inline unsigned long
829 __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
830 {
831 	return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
832 }
833 
834 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
835 {
836 	return gfn_to_memslot(kvm, gfn)->id;
837 }
838 
839 static inline gfn_t
840 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
841 {
842 	gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
843 
844 	return slot->base_gfn + gfn_offset;
845 }
846 
847 static inline gpa_t gfn_to_gpa(gfn_t gfn)
848 {
849 	return (gpa_t)gfn << PAGE_SHIFT;
850 }
851 
852 static inline gfn_t gpa_to_gfn(gpa_t gpa)
853 {
854 	return (gfn_t)(gpa >> PAGE_SHIFT);
855 }
856 
857 static inline hpa_t pfn_to_hpa(pfn_t pfn)
858 {
859 	return (hpa_t)pfn << PAGE_SHIFT;
860 }
861 
862 static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
863 {
864 	unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
865 
866 	return kvm_is_error_hva(hva);
867 }
868 
869 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
870 {
871 	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
872 }
873 
874 enum kvm_stat_kind {
875 	KVM_STAT_VM,
876 	KVM_STAT_VCPU,
877 };
878 
879 struct kvm_stats_debugfs_item {
880 	const char *name;
881 	int offset;
882 	enum kvm_stat_kind kind;
883 	struct dentry *dentry;
884 };
885 extern struct kvm_stats_debugfs_item debugfs_entries[];
886 extern struct dentry *kvm_debugfs_dir;
887 
888 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
889 static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
890 {
891 	if (unlikely(kvm->mmu_notifier_count))
892 		return 1;
893 	/*
894 	 * Ensure the read of mmu_notifier_count happens before the read
895 	 * of mmu_notifier_seq.  This interacts with the smp_wmb() in
896 	 * mmu_notifier_invalidate_range_end to make sure that the caller
897 	 * either sees the old (non-zero) value of mmu_notifier_count or
898 	 * the new (incremented) value of mmu_notifier_seq.
899 	 * PowerPC Book3s HV KVM calls this under a per-page lock
900 	 * rather than under kvm->mmu_lock, for scalability, so
901 	 * can't rely on kvm->mmu_lock to keep things ordered.
902 	 */
903 	smp_rmb();
904 	if (kvm->mmu_notifier_seq != mmu_seq)
905 		return 1;
906 	return 0;
907 }
908 #endif
909 
910 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
911 
912 #ifdef CONFIG_S390
913 #define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that...
914 #else
915 #define KVM_MAX_IRQ_ROUTES 1024
916 #endif
917 
918 int kvm_setup_default_irq_routing(struct kvm *kvm);
919 int kvm_set_irq_routing(struct kvm *kvm,
920 			const struct kvm_irq_routing_entry *entries,
921 			unsigned nr,
922 			unsigned flags);
923 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
924 			  const struct kvm_irq_routing_entry *ue);
925 void kvm_free_irq_routing(struct kvm *kvm);
926 
927 #else
928 
929 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
930 
931 #endif
932 
933 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
934 
935 #ifdef CONFIG_HAVE_KVM_EVENTFD
936 
937 void kvm_eventfd_init(struct kvm *kvm);
938 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
939 
940 #ifdef CONFIG_HAVE_KVM_IRQFD
941 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
942 void kvm_irqfd_release(struct kvm *kvm);
943 void kvm_irq_routing_update(struct kvm *);
944 #else
945 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
946 {
947 	return -EINVAL;
948 }
949 
950 static inline void kvm_irqfd_release(struct kvm *kvm) {}
951 #endif
952 
953 #else
954 
955 static inline void kvm_eventfd_init(struct kvm *kvm) {}
956 
957 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
958 {
959 	return -EINVAL;
960 }
961 
962 static inline void kvm_irqfd_release(struct kvm *kvm) {}
963 
964 #ifdef CONFIG_HAVE_KVM_IRQCHIP
965 static inline void kvm_irq_routing_update(struct kvm *kvm)
966 {
967 }
968 #endif
969 
970 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
971 {
972 	return -ENOSYS;
973 }
974 
975 #endif /* CONFIG_HAVE_KVM_EVENTFD */
976 
977 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
978 static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
979 {
980 	return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
981 }
982 
983 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
984 
985 #else
986 
987 static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
988 
989 #endif
990 
991 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
992 {
993 	set_bit(req, &vcpu->requests);
994 }
995 
996 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
997 {
998 	if (test_bit(req, &vcpu->requests)) {
999 		clear_bit(req, &vcpu->requests);
1000 		return true;
1001 	} else {
1002 		return false;
1003 	}
1004 }
1005 
1006 extern bool kvm_rebooting;
1007 
1008 struct kvm_device {
1009 	struct kvm_device_ops *ops;
1010 	struct kvm *kvm;
1011 	void *private;
1012 	struct list_head vm_node;
1013 };
1014 
1015 /* create, destroy, and name are mandatory */
1016 struct kvm_device_ops {
1017 	const char *name;
1018 	int (*create)(struct kvm_device *dev, u32 type);
1019 
1020 	/*
1021 	 * Destroy is responsible for freeing dev.
1022 	 *
1023 	 * Destroy may be called before or after destructors are called
1024 	 * on emulated I/O regions, depending on whether a reference is
1025 	 * held by a vcpu or other kvm component that gets destroyed
1026 	 * after the emulated I/O.
1027 	 */
1028 	void (*destroy)(struct kvm_device *dev);
1029 
1030 	int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1031 	int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1032 	int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1033 	long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
1034 		      unsigned long arg);
1035 };
1036 
1037 void kvm_device_get(struct kvm_device *dev);
1038 void kvm_device_put(struct kvm_device *dev);
1039 struct kvm_device *kvm_device_from_filp(struct file *filp);
1040 int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
1041 void kvm_unregister_device_ops(u32 type);
1042 
1043 extern struct kvm_device_ops kvm_mpic_ops;
1044 extern struct kvm_device_ops kvm_xics_ops;
1045 
1046 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1047 
1048 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1049 {
1050 	vcpu->spin_loop.in_spin_loop = val;
1051 }
1052 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1053 {
1054 	vcpu->spin_loop.dy_eligible = val;
1055 }
1056 
1057 #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1058 
1059 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1060 {
1061 }
1062 
1063 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1064 {
1065 }
1066 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1067 #endif
1068 
1069