xref: /linux-6.15/include/linux/kvm_host.h (revision 4f542e3d)
1 #ifndef __KVM_HOST_H
2 #define __KVM_HOST_H
3 
4 /*
5  * This work is licensed under the terms of the GNU GPL, version 2.  See
6  * the COPYING file in the top-level directory.
7  */
8 
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/mm.h>
17 #include <linux/preempt.h>
18 #include <linux/msi.h>
19 #include <linux/slab.h>
20 #include <linux/rcupdate.h>
21 #include <asm/signal.h>
22 
23 #include <linux/kvm.h>
24 #include <linux/kvm_para.h>
25 
26 #include <linux/kvm_types.h>
27 
28 #include <asm/kvm_host.h>
29 
30 /*
31  * vcpu->requests bit members
32  */
33 #define KVM_REQ_TLB_FLUSH          0
34 #define KVM_REQ_MIGRATE_TIMER      1
35 #define KVM_REQ_REPORT_TPR_ACCESS  2
36 #define KVM_REQ_MMU_RELOAD         3
37 #define KVM_REQ_TRIPLE_FAULT       4
38 #define KVM_REQ_PENDING_TIMER      5
39 #define KVM_REQ_UNHALT             6
40 #define KVM_REQ_MMU_SYNC           7
41 #define KVM_REQ_CLOCK_UPDATE       8
42 #define KVM_REQ_KICK               9
43 #define KVM_REQ_DEACTIVATE_FPU    10
44 #define KVM_REQ_EVENT             11
45 #define KVM_REQ_APF_HALT          12
46 
47 #define KVM_USERSPACE_IRQ_SOURCE_ID	0
48 
49 struct kvm;
50 struct kvm_vcpu;
51 extern struct kmem_cache *kvm_vcpu_cache;
52 
53 /*
54  * It would be nice to use something smarter than a linear search, TBD...
55  * Thankfully we dont expect many devices to register (famous last words :),
56  * so until then it will suffice.  At least its abstracted so we can change
57  * in one place.
58  */
59 struct kvm_io_bus {
60 	int                   dev_count;
61 #define NR_IOBUS_DEVS 200
62 	struct kvm_io_device *devs[NR_IOBUS_DEVS];
63 };
64 
65 enum kvm_bus {
66 	KVM_MMIO_BUS,
67 	KVM_PIO_BUS,
68 	KVM_NR_BUSES
69 };
70 
71 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
72 		     int len, const void *val);
73 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
74 		    void *val);
75 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
76 			    struct kvm_io_device *dev);
77 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
78 			      struct kvm_io_device *dev);
79 
80 #ifdef CONFIG_KVM_ASYNC_PF
81 struct kvm_async_pf {
82 	struct work_struct work;
83 	struct list_head link;
84 	struct list_head queue;
85 	struct kvm_vcpu *vcpu;
86 	struct mm_struct *mm;
87 	gva_t gva;
88 	unsigned long addr;
89 	struct kvm_arch_async_pf arch;
90 	struct page *page;
91 	bool done;
92 };
93 
94 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
95 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
96 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
97 		       struct kvm_arch_async_pf *arch);
98 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
99 #endif
100 
101 struct kvm_vcpu {
102 	struct kvm *kvm;
103 #ifdef CONFIG_PREEMPT_NOTIFIERS
104 	struct preempt_notifier preempt_notifier;
105 #endif
106 	int vcpu_id;
107 	struct mutex mutex;
108 	int   cpu;
109 	atomic_t guest_mode;
110 	struct kvm_run *run;
111 	unsigned long requests;
112 	unsigned long guest_debug;
113 	int srcu_idx;
114 
115 	int fpu_active;
116 	int guest_fpu_loaded, guest_xcr0_loaded;
117 	wait_queue_head_t wq;
118 	int sigset_active;
119 	sigset_t sigset;
120 	struct kvm_vcpu_stat stat;
121 
122 #ifdef CONFIG_HAS_IOMEM
123 	int mmio_needed;
124 	int mmio_read_completed;
125 	int mmio_is_write;
126 	int mmio_size;
127 	unsigned char mmio_data[8];
128 	gpa_t mmio_phys_addr;
129 #endif
130 
131 #ifdef CONFIG_KVM_ASYNC_PF
132 	struct {
133 		u32 queued;
134 		struct list_head queue;
135 		struct list_head done;
136 		spinlock_t lock;
137 	} async_pf;
138 #endif
139 
140 	struct kvm_vcpu_arch arch;
141 };
142 
143 /*
144  * Some of the bitops functions do not support too long bitmaps.
145  * This number must be determined not to exceed such limits.
146  */
147 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
148 
149 struct kvm_lpage_info {
150 	unsigned long rmap_pde;
151 	int write_count;
152 };
153 
154 struct kvm_memory_slot {
155 	gfn_t base_gfn;
156 	unsigned long npages;
157 	unsigned long flags;
158 	unsigned long *rmap;
159 	unsigned long *dirty_bitmap;
160 	unsigned long *dirty_bitmap_head;
161 	struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
162 	unsigned long userspace_addr;
163 	int user_alloc;
164 	int id;
165 };
166 
167 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
168 {
169 	return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
170 }
171 
172 struct kvm_kernel_irq_routing_entry {
173 	u32 gsi;
174 	u32 type;
175 	int (*set)(struct kvm_kernel_irq_routing_entry *e,
176 		   struct kvm *kvm, int irq_source_id, int level);
177 	union {
178 		struct {
179 			unsigned irqchip;
180 			unsigned pin;
181 		} irqchip;
182 		struct msi_msg msi;
183 	};
184 	struct hlist_node link;
185 };
186 
187 #ifdef __KVM_HAVE_IOAPIC
188 
189 struct kvm_irq_routing_table {
190 	int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS];
191 	struct kvm_kernel_irq_routing_entry *rt_entries;
192 	u32 nr_rt_entries;
193 	/*
194 	 * Array indexed by gsi. Each entry contains list of irq chips
195 	 * the gsi is connected to.
196 	 */
197 	struct hlist_head map[0];
198 };
199 
200 #else
201 
202 struct kvm_irq_routing_table {};
203 
204 #endif
205 
206 struct kvm_memslots {
207 	int nmemslots;
208 	u64 generation;
209 	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
210 					KVM_PRIVATE_MEM_SLOTS];
211 };
212 
213 struct kvm {
214 	spinlock_t mmu_lock;
215 	raw_spinlock_t requests_lock;
216 	struct mutex slots_lock;
217 	struct mm_struct *mm; /* userspace tied to this vm */
218 	struct kvm_memslots *memslots;
219 	struct srcu_struct srcu;
220 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
221 	u32 bsp_vcpu_id;
222 	struct kvm_vcpu *bsp_vcpu;
223 #endif
224 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
225 	atomic_t online_vcpus;
226 	struct list_head vm_list;
227 	struct mutex lock;
228 	struct kvm_io_bus *buses[KVM_NR_BUSES];
229 #ifdef CONFIG_HAVE_KVM_EVENTFD
230 	struct {
231 		spinlock_t        lock;
232 		struct list_head  items;
233 	} irqfds;
234 	struct list_head ioeventfds;
235 #endif
236 	struct kvm_vm_stat stat;
237 	struct kvm_arch arch;
238 	atomic_t users_count;
239 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
240 	struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
241 	struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
242 #endif
243 
244 	struct mutex irq_lock;
245 #ifdef CONFIG_HAVE_KVM_IRQCHIP
246 	/*
247 	 * Update side is protected by irq_lock and,
248 	 * if configured, irqfds.lock.
249 	 */
250 	struct kvm_irq_routing_table __rcu *irq_routing;
251 	struct hlist_head mask_notifier_list;
252 	struct hlist_head irq_ack_notifier_list;
253 #endif
254 
255 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
256 	struct mmu_notifier mmu_notifier;
257 	unsigned long mmu_notifier_seq;
258 	long mmu_notifier_count;
259 #endif
260 	long tlbs_dirty;
261 };
262 
263 /* The guest did something we don't support. */
264 #define pr_unimpl(vcpu, fmt, ...)					\
265  do {									\
266 	if (printk_ratelimit())						\
267 		printk(KERN_ERR "kvm: %i: cpu%i " fmt,			\
268 		       current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
269  } while (0)
270 
271 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
272 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
273 
274 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
275 {
276 	smp_rmb();
277 	return kvm->vcpus[i];
278 }
279 
280 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
281 	for (idx = 0, vcpup = kvm_get_vcpu(kvm, idx); \
282 	     idx < atomic_read(&kvm->online_vcpus) && vcpup; \
283 	     vcpup = kvm_get_vcpu(kvm, ++idx))
284 
285 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
286 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
287 
288 void vcpu_load(struct kvm_vcpu *vcpu);
289 void vcpu_put(struct kvm_vcpu *vcpu);
290 
291 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
292 		  struct module *module);
293 void kvm_exit(void);
294 
295 void kvm_get_kvm(struct kvm *kvm);
296 void kvm_put_kvm(struct kvm *kvm);
297 
298 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
299 {
300 	return rcu_dereference_check(kvm->memslots,
301 			srcu_read_lock_held(&kvm->srcu)
302 			|| lockdep_is_held(&kvm->slots_lock));
303 }
304 
305 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
306 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
307 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
308 
309 extern struct page *bad_page;
310 extern pfn_t bad_pfn;
311 
312 int is_error_page(struct page *page);
313 int is_error_pfn(pfn_t pfn);
314 int is_hwpoison_pfn(pfn_t pfn);
315 int is_fault_pfn(pfn_t pfn);
316 int kvm_is_error_hva(unsigned long addr);
317 int kvm_set_memory_region(struct kvm *kvm,
318 			  struct kvm_userspace_memory_region *mem,
319 			  int user_alloc);
320 int __kvm_set_memory_region(struct kvm *kvm,
321 			    struct kvm_userspace_memory_region *mem,
322 			    int user_alloc);
323 int kvm_arch_prepare_memory_region(struct kvm *kvm,
324 				struct kvm_memory_slot *memslot,
325 				struct kvm_memory_slot old,
326 				struct kvm_userspace_memory_region *mem,
327 				int user_alloc);
328 void kvm_arch_commit_memory_region(struct kvm *kvm,
329 				struct kvm_userspace_memory_region *mem,
330 				struct kvm_memory_slot old,
331 				int user_alloc);
332 void kvm_disable_largepages(void);
333 void kvm_arch_flush_shadow(struct kvm *kvm);
334 
335 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
336 			    int nr_pages);
337 
338 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
339 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
340 void kvm_release_page_clean(struct page *page);
341 void kvm_release_page_dirty(struct page *page);
342 void kvm_set_page_dirty(struct page *page);
343 void kvm_set_page_accessed(struct page *page);
344 
345 pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr);
346 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
347 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
348 		       bool write_fault, bool *writable);
349 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
350 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
351 		      bool *writable);
352 pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
353 			 struct kvm_memory_slot *slot, gfn_t gfn);
354 int memslot_id(struct kvm *kvm, gfn_t gfn);
355 void kvm_release_pfn_dirty(pfn_t);
356 void kvm_release_pfn_clean(pfn_t pfn);
357 void kvm_set_pfn_dirty(pfn_t pfn);
358 void kvm_set_pfn_accessed(pfn_t pfn);
359 void kvm_get_pfn(pfn_t pfn);
360 
361 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
362 			int len);
363 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
364 			  unsigned long len);
365 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
366 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
367 			 int offset, int len);
368 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
369 		    unsigned long len);
370 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
371 			   void *data, unsigned long len);
372 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
373 			      gpa_t gpa);
374 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
375 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
376 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
377 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
378 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
379 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
380 void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
381 			     gfn_t gfn);
382 
383 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
384 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
385 void kvm_resched(struct kvm_vcpu *vcpu);
386 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
387 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
388 
389 void kvm_flush_remote_tlbs(struct kvm *kvm);
390 void kvm_reload_remote_mmus(struct kvm *kvm);
391 
392 long kvm_arch_dev_ioctl(struct file *filp,
393 			unsigned int ioctl, unsigned long arg);
394 long kvm_arch_vcpu_ioctl(struct file *filp,
395 			 unsigned int ioctl, unsigned long arg);
396 
397 int kvm_dev_ioctl_check_extension(long ext);
398 
399 int kvm_get_dirty_log(struct kvm *kvm,
400 			struct kvm_dirty_log *log, int *is_dirty);
401 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
402 				struct kvm_dirty_log *log);
403 
404 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
405 				   struct
406 				   kvm_userspace_memory_region *mem,
407 				   int user_alloc);
408 long kvm_arch_vm_ioctl(struct file *filp,
409 		       unsigned int ioctl, unsigned long arg);
410 
411 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
412 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
413 
414 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
415 				    struct kvm_translation *tr);
416 
417 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
418 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
419 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
420 				  struct kvm_sregs *sregs);
421 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
422 				  struct kvm_sregs *sregs);
423 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
424 				    struct kvm_mp_state *mp_state);
425 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
426 				    struct kvm_mp_state *mp_state);
427 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
428 					struct kvm_guest_debug *dbg);
429 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
430 
431 int kvm_arch_init(void *opaque);
432 void kvm_arch_exit(void);
433 
434 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
435 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
436 
437 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
438 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
439 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
440 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
441 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
442 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
443 
444 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
445 int kvm_arch_hardware_enable(void *garbage);
446 void kvm_arch_hardware_disable(void *garbage);
447 int kvm_arch_hardware_setup(void);
448 void kvm_arch_hardware_unsetup(void);
449 void kvm_arch_check_processor_compat(void *rtn);
450 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
451 
452 void kvm_free_physmem(struct kvm *kvm);
453 
454 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
455 static inline struct kvm *kvm_arch_alloc_vm(void)
456 {
457 	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
458 }
459 
460 static inline void kvm_arch_free_vm(struct kvm *kvm)
461 {
462 	kfree(kvm);
463 }
464 #endif
465 
466 int kvm_arch_init_vm(struct kvm *kvm);
467 void kvm_arch_destroy_vm(struct kvm *kvm);
468 void kvm_free_all_assigned_devices(struct kvm *kvm);
469 void kvm_arch_sync_events(struct kvm *kvm);
470 
471 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
472 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
473 
474 int kvm_is_mmio_pfn(pfn_t pfn);
475 
476 struct kvm_irq_ack_notifier {
477 	struct hlist_node link;
478 	unsigned gsi;
479 	void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
480 };
481 
482 struct kvm_assigned_dev_kernel {
483 	struct kvm_irq_ack_notifier ack_notifier;
484 	struct list_head list;
485 	int assigned_dev_id;
486 	int host_segnr;
487 	int host_busnr;
488 	int host_devfn;
489 	unsigned int entries_nr;
490 	int host_irq;
491 	bool host_irq_disabled;
492 	struct msix_entry *host_msix_entries;
493 	int guest_irq;
494 	struct msix_entry *guest_msix_entries;
495 	unsigned long irq_requested_type;
496 	int irq_source_id;
497 	int flags;
498 	struct pci_dev *dev;
499 	struct kvm *kvm;
500 	spinlock_t intx_lock;
501 	char irq_name[32];
502 };
503 
504 struct kvm_irq_mask_notifier {
505 	void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
506 	int irq;
507 	struct hlist_node link;
508 };
509 
510 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
511 				    struct kvm_irq_mask_notifier *kimn);
512 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
513 				      struct kvm_irq_mask_notifier *kimn);
514 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
515 			     bool mask);
516 
517 #ifdef __KVM_HAVE_IOAPIC
518 void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
519 				   union kvm_ioapic_redirect_entry *entry,
520 				   unsigned long *deliver_bitmask);
521 #endif
522 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
523 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
524 		int irq_source_id, int level);
525 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
526 void kvm_register_irq_ack_notifier(struct kvm *kvm,
527 				   struct kvm_irq_ack_notifier *kian);
528 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
529 				   struct kvm_irq_ack_notifier *kian);
530 int kvm_request_irq_source_id(struct kvm *kvm);
531 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
532 
533 /* For vcpu->arch.iommu_flags */
534 #define KVM_IOMMU_CACHE_COHERENCY	0x1
535 
536 #ifdef CONFIG_IOMMU_API
537 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
538 int kvm_iommu_map_guest(struct kvm *kvm);
539 int kvm_iommu_unmap_guest(struct kvm *kvm);
540 int kvm_assign_device(struct kvm *kvm,
541 		      struct kvm_assigned_dev_kernel *assigned_dev);
542 int kvm_deassign_device(struct kvm *kvm,
543 			struct kvm_assigned_dev_kernel *assigned_dev);
544 #else /* CONFIG_IOMMU_API */
545 static inline int kvm_iommu_map_pages(struct kvm *kvm,
546 				      struct kvm_memory_slot *slot)
547 {
548 	return 0;
549 }
550 
551 static inline int kvm_iommu_map_guest(struct kvm *kvm)
552 {
553 	return -ENODEV;
554 }
555 
556 static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
557 {
558 	return 0;
559 }
560 
561 static inline int kvm_assign_device(struct kvm *kvm,
562 		struct kvm_assigned_dev_kernel *assigned_dev)
563 {
564 	return 0;
565 }
566 
567 static inline int kvm_deassign_device(struct kvm *kvm,
568 		struct kvm_assigned_dev_kernel *assigned_dev)
569 {
570 	return 0;
571 }
572 #endif /* CONFIG_IOMMU_API */
573 
574 static inline void kvm_guest_enter(void)
575 {
576 	account_system_vtime(current);
577 	current->flags |= PF_VCPU;
578 }
579 
580 static inline void kvm_guest_exit(void)
581 {
582 	account_system_vtime(current);
583 	current->flags &= ~PF_VCPU;
584 }
585 
586 static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
587 					       gfn_t gfn)
588 {
589 	return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
590 }
591 
592 static inline gpa_t gfn_to_gpa(gfn_t gfn)
593 {
594 	return (gpa_t)gfn << PAGE_SHIFT;
595 }
596 
597 static inline gfn_t gpa_to_gfn(gpa_t gpa)
598 {
599 	return (gfn_t)(gpa >> PAGE_SHIFT);
600 }
601 
602 static inline hpa_t pfn_to_hpa(pfn_t pfn)
603 {
604 	return (hpa_t)pfn << PAGE_SHIFT;
605 }
606 
607 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
608 {
609 	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
610 }
611 
612 enum kvm_stat_kind {
613 	KVM_STAT_VM,
614 	KVM_STAT_VCPU,
615 };
616 
617 struct kvm_stats_debugfs_item {
618 	const char *name;
619 	int offset;
620 	enum kvm_stat_kind kind;
621 	struct dentry *dentry;
622 };
623 extern struct kvm_stats_debugfs_item debugfs_entries[];
624 extern struct dentry *kvm_debugfs_dir;
625 
626 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
627 static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
628 {
629 	if (unlikely(vcpu->kvm->mmu_notifier_count))
630 		return 1;
631 	/*
632 	 * Both reads happen under the mmu_lock and both values are
633 	 * modified under mmu_lock, so there's no need of smb_rmb()
634 	 * here in between, otherwise mmu_notifier_count should be
635 	 * read before mmu_notifier_seq, see
636 	 * mmu_notifier_invalidate_range_end write side.
637 	 */
638 	if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
639 		return 1;
640 	return 0;
641 }
642 #endif
643 
644 #ifdef CONFIG_HAVE_KVM_IRQCHIP
645 
646 #define KVM_MAX_IRQ_ROUTES 1024
647 
648 int kvm_setup_default_irq_routing(struct kvm *kvm);
649 int kvm_set_irq_routing(struct kvm *kvm,
650 			const struct kvm_irq_routing_entry *entries,
651 			unsigned nr,
652 			unsigned flags);
653 void kvm_free_irq_routing(struct kvm *kvm);
654 
655 #else
656 
657 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
658 
659 #endif
660 
661 #ifdef CONFIG_HAVE_KVM_EVENTFD
662 
663 void kvm_eventfd_init(struct kvm *kvm);
664 int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
665 void kvm_irqfd_release(struct kvm *kvm);
666 void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
667 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
668 
669 #else
670 
671 static inline void kvm_eventfd_init(struct kvm *kvm) {}
672 
673 static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
674 {
675 	return -EINVAL;
676 }
677 
678 static inline void kvm_irqfd_release(struct kvm *kvm) {}
679 
680 #ifdef CONFIG_HAVE_KVM_IRQCHIP
681 static inline void kvm_irq_routing_update(struct kvm *kvm,
682 					  struct kvm_irq_routing_table *irq_rt)
683 {
684 	rcu_assign_pointer(kvm->irq_routing, irq_rt);
685 }
686 #endif
687 
688 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
689 {
690 	return -ENOSYS;
691 }
692 
693 #endif /* CONFIG_HAVE_KVM_EVENTFD */
694 
695 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
696 static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
697 {
698 	return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
699 }
700 #endif
701 
702 #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
703 
704 long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
705 				  unsigned long arg);
706 
707 #else
708 
709 static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
710 						unsigned long arg)
711 {
712 	return -ENOTTY;
713 }
714 
715 #endif
716 
717 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
718 {
719 	set_bit(req, &vcpu->requests);
720 }
721 
722 static inline bool kvm_make_check_request(int req, struct kvm_vcpu *vcpu)
723 {
724 	return test_and_set_bit(req, &vcpu->requests);
725 }
726 
727 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
728 {
729 	if (test_bit(req, &vcpu->requests)) {
730 		clear_bit(req, &vcpu->requests);
731 		return true;
732 	} else {
733 		return false;
734 	}
735 }
736 
737 #endif
738 
739