xref: /linux-6.15/include/linux/kvm_host.h (revision e9e8bcb8)
1 #ifndef __KVM_HOST_H
2 #define __KVM_HOST_H
3 
4 /*
5  * This work is licensed under the terms of the GNU GPL, version 2.  See
6  * the COPYING file in the top-level directory.
7  */
8 
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/mm.h>
17 #include <linux/preempt.h>
18 #include <linux/msi.h>
19 #include <linux/slab.h>
20 #include <linux/rcupdate.h>
21 #include <asm/signal.h>
22 
23 #include <linux/kvm.h>
24 #include <linux/kvm_para.h>
25 
26 #include <linux/kvm_types.h>
27 
28 #include <asm/kvm_host.h>
29 
30 #ifndef KVM_MMIO_SIZE
31 #define KVM_MMIO_SIZE 8
32 #endif
33 
34 /*
35  * vcpu->requests bit members
36  */
37 #define KVM_REQ_TLB_FLUSH          0
38 #define KVM_REQ_MIGRATE_TIMER      1
39 #define KVM_REQ_REPORT_TPR_ACCESS  2
40 #define KVM_REQ_MMU_RELOAD         3
41 #define KVM_REQ_TRIPLE_FAULT       4
42 #define KVM_REQ_PENDING_TIMER      5
43 #define KVM_REQ_UNHALT             6
44 #define KVM_REQ_MMU_SYNC           7
45 #define KVM_REQ_CLOCK_UPDATE       8
46 #define KVM_REQ_KICK               9
47 #define KVM_REQ_DEACTIVATE_FPU    10
48 #define KVM_REQ_EVENT             11
49 #define KVM_REQ_APF_HALT          12
50 
51 #define KVM_USERSPACE_IRQ_SOURCE_ID	0
52 
53 struct kvm;
54 struct kvm_vcpu;
55 extern struct kmem_cache *kvm_vcpu_cache;
56 
57 /*
58  * It would be nice to use something smarter than a linear search, TBD...
59  * Thankfully we dont expect many devices to register (famous last words :),
60  * so until then it will suffice.  At least its abstracted so we can change
61  * in one place.
62  */
63 struct kvm_io_bus {
64 	int                   dev_count;
65 #define NR_IOBUS_DEVS 200
66 	struct kvm_io_device *devs[NR_IOBUS_DEVS];
67 };
68 
69 enum kvm_bus {
70 	KVM_MMIO_BUS,
71 	KVM_PIO_BUS,
72 	KVM_NR_BUSES
73 };
74 
75 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
76 		     int len, const void *val);
77 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
78 		    void *val);
79 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
80 			    struct kvm_io_device *dev);
81 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
82 			      struct kvm_io_device *dev);
83 
84 #ifdef CONFIG_KVM_ASYNC_PF
85 struct kvm_async_pf {
86 	struct work_struct work;
87 	struct list_head link;
88 	struct list_head queue;
89 	struct kvm_vcpu *vcpu;
90 	struct mm_struct *mm;
91 	gva_t gva;
92 	unsigned long addr;
93 	struct kvm_arch_async_pf arch;
94 	struct page *page;
95 	bool done;
96 };
97 
98 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
99 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
100 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
101 		       struct kvm_arch_async_pf *arch);
102 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
103 #endif
104 
105 enum {
106 	OUTSIDE_GUEST_MODE,
107 	IN_GUEST_MODE,
108 	EXITING_GUEST_MODE
109 };
110 
111 struct kvm_vcpu {
112 	struct kvm *kvm;
113 #ifdef CONFIG_PREEMPT_NOTIFIERS
114 	struct preempt_notifier preempt_notifier;
115 #endif
116 	int cpu;
117 	int vcpu_id;
118 	int srcu_idx;
119 	int mode;
120 	unsigned long requests;
121 	unsigned long guest_debug;
122 
123 	struct mutex mutex;
124 	struct kvm_run *run;
125 
126 	int fpu_active;
127 	int guest_fpu_loaded, guest_xcr0_loaded;
128 	wait_queue_head_t wq;
129 	struct pid *pid;
130 	int sigset_active;
131 	sigset_t sigset;
132 	struct kvm_vcpu_stat stat;
133 
134 #ifdef CONFIG_HAS_IOMEM
135 	int mmio_needed;
136 	int mmio_read_completed;
137 	int mmio_is_write;
138 	int mmio_size;
139 	int mmio_index;
140 	unsigned char mmio_data[KVM_MMIO_SIZE];
141 	gpa_t mmio_phys_addr;
142 #endif
143 
144 #ifdef CONFIG_KVM_ASYNC_PF
145 	struct {
146 		u32 queued;
147 		struct list_head queue;
148 		struct list_head done;
149 		spinlock_t lock;
150 	} async_pf;
151 #endif
152 
153 	struct kvm_vcpu_arch arch;
154 };
155 
156 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
157 {
158 	return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
159 }
160 
161 /*
162  * Some of the bitops functions do not support too long bitmaps.
163  * This number must be determined not to exceed such limits.
164  */
165 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
166 
167 struct kvm_lpage_info {
168 	unsigned long rmap_pde;
169 	int write_count;
170 };
171 
172 struct kvm_memory_slot {
173 	gfn_t base_gfn;
174 	unsigned long npages;
175 	unsigned long flags;
176 	unsigned long *rmap;
177 	unsigned long *dirty_bitmap;
178 	unsigned long *dirty_bitmap_head;
179 	struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
180 	unsigned long userspace_addr;
181 	int user_alloc;
182 	int id;
183 };
184 
185 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
186 {
187 	return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
188 }
189 
190 struct kvm_kernel_irq_routing_entry {
191 	u32 gsi;
192 	u32 type;
193 	int (*set)(struct kvm_kernel_irq_routing_entry *e,
194 		   struct kvm *kvm, int irq_source_id, int level);
195 	union {
196 		struct {
197 			unsigned irqchip;
198 			unsigned pin;
199 		} irqchip;
200 		struct msi_msg msi;
201 	};
202 	struct hlist_node link;
203 };
204 
205 #ifdef __KVM_HAVE_IOAPIC
206 
207 struct kvm_irq_routing_table {
208 	int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS];
209 	struct kvm_kernel_irq_routing_entry *rt_entries;
210 	u32 nr_rt_entries;
211 	/*
212 	 * Array indexed by gsi. Each entry contains list of irq chips
213 	 * the gsi is connected to.
214 	 */
215 	struct hlist_head map[0];
216 };
217 
218 #else
219 
220 struct kvm_irq_routing_table {};
221 
222 #endif
223 
224 struct kvm_memslots {
225 	int nmemslots;
226 	u64 generation;
227 	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
228 					KVM_PRIVATE_MEM_SLOTS];
229 };
230 
231 struct kvm {
232 	spinlock_t mmu_lock;
233 	struct mutex slots_lock;
234 	struct mm_struct *mm; /* userspace tied to this vm */
235 	struct kvm_memslots *memslots;
236 	struct srcu_struct srcu;
237 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
238 	u32 bsp_vcpu_id;
239 	struct kvm_vcpu *bsp_vcpu;
240 #endif
241 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
242 	atomic_t online_vcpus;
243 	int last_boosted_vcpu;
244 	struct list_head vm_list;
245 	struct mutex lock;
246 	struct kvm_io_bus *buses[KVM_NR_BUSES];
247 #ifdef CONFIG_HAVE_KVM_EVENTFD
248 	struct {
249 		spinlock_t        lock;
250 		struct list_head  items;
251 	} irqfds;
252 	struct list_head ioeventfds;
253 #endif
254 	struct kvm_vm_stat stat;
255 	struct kvm_arch arch;
256 	atomic_t users_count;
257 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
258 	struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
259 	struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
260 #endif
261 
262 	struct mutex irq_lock;
263 #ifdef CONFIG_HAVE_KVM_IRQCHIP
264 	/*
265 	 * Update side is protected by irq_lock and,
266 	 * if configured, irqfds.lock.
267 	 */
268 	struct kvm_irq_routing_table __rcu *irq_routing;
269 	struct hlist_head mask_notifier_list;
270 	struct hlist_head irq_ack_notifier_list;
271 #endif
272 
273 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
274 	struct mmu_notifier mmu_notifier;
275 	unsigned long mmu_notifier_seq;
276 	long mmu_notifier_count;
277 #endif
278 	long tlbs_dirty;
279 };
280 
281 /* The guest did something we don't support. */
282 #define pr_unimpl(vcpu, fmt, ...)					\
283  do {									\
284 	if (printk_ratelimit())						\
285 		printk(KERN_ERR "kvm: %i: cpu%i " fmt,			\
286 		       current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
287  } while (0)
288 
289 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
290 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
291 
292 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
293 {
294 	smp_rmb();
295 	return kvm->vcpus[i];
296 }
297 
298 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
299 	for (idx = 0; \
300 	     idx < atomic_read(&kvm->online_vcpus) && \
301 	     (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
302 	     idx++)
303 
304 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
305 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
306 
307 void vcpu_load(struct kvm_vcpu *vcpu);
308 void vcpu_put(struct kvm_vcpu *vcpu);
309 
310 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
311 		  struct module *module);
312 void kvm_exit(void);
313 
314 void kvm_get_kvm(struct kvm *kvm);
315 void kvm_put_kvm(struct kvm *kvm);
316 
317 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
318 {
319 	return rcu_dereference_check(kvm->memslots,
320 			srcu_read_lock_held(&kvm->srcu)
321 			|| lockdep_is_held(&kvm->slots_lock));
322 }
323 
324 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
325 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
326 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
327 
328 extern struct page *bad_page;
329 extern pfn_t bad_pfn;
330 
331 int is_error_page(struct page *page);
332 int is_error_pfn(pfn_t pfn);
333 int is_hwpoison_pfn(pfn_t pfn);
334 int is_fault_pfn(pfn_t pfn);
335 int kvm_is_error_hva(unsigned long addr);
336 int kvm_set_memory_region(struct kvm *kvm,
337 			  struct kvm_userspace_memory_region *mem,
338 			  int user_alloc);
339 int __kvm_set_memory_region(struct kvm *kvm,
340 			    struct kvm_userspace_memory_region *mem,
341 			    int user_alloc);
342 int kvm_arch_prepare_memory_region(struct kvm *kvm,
343 				struct kvm_memory_slot *memslot,
344 				struct kvm_memory_slot old,
345 				struct kvm_userspace_memory_region *mem,
346 				int user_alloc);
347 void kvm_arch_commit_memory_region(struct kvm *kvm,
348 				struct kvm_userspace_memory_region *mem,
349 				struct kvm_memory_slot old,
350 				int user_alloc);
351 void kvm_disable_largepages(void);
352 void kvm_arch_flush_shadow(struct kvm *kvm);
353 
354 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
355 			    int nr_pages);
356 
357 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
358 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
359 void kvm_release_page_clean(struct page *page);
360 void kvm_release_page_dirty(struct page *page);
361 void kvm_set_page_dirty(struct page *page);
362 void kvm_set_page_accessed(struct page *page);
363 
364 pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr);
365 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
366 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
367 		       bool write_fault, bool *writable);
368 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
369 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
370 		      bool *writable);
371 pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
372 			 struct kvm_memory_slot *slot, gfn_t gfn);
373 void kvm_release_pfn_dirty(pfn_t);
374 void kvm_release_pfn_clean(pfn_t pfn);
375 void kvm_set_pfn_dirty(pfn_t pfn);
376 void kvm_set_pfn_accessed(pfn_t pfn);
377 void kvm_get_pfn(pfn_t pfn);
378 
379 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
380 			int len);
381 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
382 			  unsigned long len);
383 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
384 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
385 			 int offset, int len);
386 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
387 		    unsigned long len);
388 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
389 			   void *data, unsigned long len);
390 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
391 			      gpa_t gpa);
392 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
393 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
394 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
395 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
396 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
397 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
398 void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
399 			     gfn_t gfn);
400 
401 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
402 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
403 void kvm_resched(struct kvm_vcpu *vcpu);
404 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
405 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
406 
407 void kvm_flush_remote_tlbs(struct kvm *kvm);
408 void kvm_reload_remote_mmus(struct kvm *kvm);
409 
410 long kvm_arch_dev_ioctl(struct file *filp,
411 			unsigned int ioctl, unsigned long arg);
412 long kvm_arch_vcpu_ioctl(struct file *filp,
413 			 unsigned int ioctl, unsigned long arg);
414 
415 int kvm_dev_ioctl_check_extension(long ext);
416 
417 int kvm_get_dirty_log(struct kvm *kvm,
418 			struct kvm_dirty_log *log, int *is_dirty);
419 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
420 				struct kvm_dirty_log *log);
421 
422 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
423 				   struct
424 				   kvm_userspace_memory_region *mem,
425 				   int user_alloc);
426 long kvm_arch_vm_ioctl(struct file *filp,
427 		       unsigned int ioctl, unsigned long arg);
428 
429 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
430 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
431 
432 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
433 				    struct kvm_translation *tr);
434 
435 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
436 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
437 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
438 				  struct kvm_sregs *sregs);
439 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
440 				  struct kvm_sregs *sregs);
441 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
442 				    struct kvm_mp_state *mp_state);
443 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
444 				    struct kvm_mp_state *mp_state);
445 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
446 					struct kvm_guest_debug *dbg);
447 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
448 
449 int kvm_arch_init(void *opaque);
450 void kvm_arch_exit(void);
451 
452 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
453 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
454 
455 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
456 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
457 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
458 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
459 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
460 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
461 
462 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
463 int kvm_arch_hardware_enable(void *garbage);
464 void kvm_arch_hardware_disable(void *garbage);
465 int kvm_arch_hardware_setup(void);
466 void kvm_arch_hardware_unsetup(void);
467 void kvm_arch_check_processor_compat(void *rtn);
468 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
469 
470 void kvm_free_physmem(struct kvm *kvm);
471 
472 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
473 static inline struct kvm *kvm_arch_alloc_vm(void)
474 {
475 	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
476 }
477 
478 static inline void kvm_arch_free_vm(struct kvm *kvm)
479 {
480 	kfree(kvm);
481 }
482 #endif
483 
484 int kvm_arch_init_vm(struct kvm *kvm);
485 void kvm_arch_destroy_vm(struct kvm *kvm);
486 void kvm_free_all_assigned_devices(struct kvm *kvm);
487 void kvm_arch_sync_events(struct kvm *kvm);
488 
489 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
490 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
491 
492 int kvm_is_mmio_pfn(pfn_t pfn);
493 
494 struct kvm_irq_ack_notifier {
495 	struct hlist_node link;
496 	unsigned gsi;
497 	void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
498 };
499 
500 struct kvm_assigned_dev_kernel {
501 	struct kvm_irq_ack_notifier ack_notifier;
502 	struct list_head list;
503 	int assigned_dev_id;
504 	int host_segnr;
505 	int host_busnr;
506 	int host_devfn;
507 	unsigned int entries_nr;
508 	int host_irq;
509 	bool host_irq_disabled;
510 	struct msix_entry *host_msix_entries;
511 	int guest_irq;
512 	struct msix_entry *guest_msix_entries;
513 	unsigned long irq_requested_type;
514 	int irq_source_id;
515 	int flags;
516 	struct pci_dev *dev;
517 	struct kvm *kvm;
518 	spinlock_t intx_lock;
519 	char irq_name[32];
520 	struct pci_saved_state *pci_saved_state;
521 };
522 
523 struct kvm_irq_mask_notifier {
524 	void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
525 	int irq;
526 	struct hlist_node link;
527 };
528 
529 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
530 				    struct kvm_irq_mask_notifier *kimn);
531 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
532 				      struct kvm_irq_mask_notifier *kimn);
533 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
534 			     bool mask);
535 
536 #ifdef __KVM_HAVE_IOAPIC
537 void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
538 				   union kvm_ioapic_redirect_entry *entry,
539 				   unsigned long *deliver_bitmask);
540 #endif
541 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
542 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
543 		int irq_source_id, int level);
544 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
545 void kvm_register_irq_ack_notifier(struct kvm *kvm,
546 				   struct kvm_irq_ack_notifier *kian);
547 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
548 				   struct kvm_irq_ack_notifier *kian);
549 int kvm_request_irq_source_id(struct kvm *kvm);
550 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
551 
552 /* For vcpu->arch.iommu_flags */
553 #define KVM_IOMMU_CACHE_COHERENCY	0x1
554 
555 #ifdef CONFIG_IOMMU_API
556 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
557 int kvm_iommu_map_guest(struct kvm *kvm);
558 int kvm_iommu_unmap_guest(struct kvm *kvm);
559 int kvm_assign_device(struct kvm *kvm,
560 		      struct kvm_assigned_dev_kernel *assigned_dev);
561 int kvm_deassign_device(struct kvm *kvm,
562 			struct kvm_assigned_dev_kernel *assigned_dev);
563 #else /* CONFIG_IOMMU_API */
564 static inline int kvm_iommu_map_pages(struct kvm *kvm,
565 				      struct kvm_memory_slot *slot)
566 {
567 	return 0;
568 }
569 
570 static inline int kvm_iommu_map_guest(struct kvm *kvm)
571 {
572 	return -ENODEV;
573 }
574 
575 static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
576 {
577 	return 0;
578 }
579 
580 static inline int kvm_assign_device(struct kvm *kvm,
581 		struct kvm_assigned_dev_kernel *assigned_dev)
582 {
583 	return 0;
584 }
585 
586 static inline int kvm_deassign_device(struct kvm *kvm,
587 		struct kvm_assigned_dev_kernel *assigned_dev)
588 {
589 	return 0;
590 }
591 #endif /* CONFIG_IOMMU_API */
592 
593 static inline void kvm_guest_enter(void)
594 {
595 	BUG_ON(preemptible());
596 	account_system_vtime(current);
597 	current->flags |= PF_VCPU;
598 	/* KVM does not hold any references to rcu protected data when it
599 	 * switches CPU into a guest mode. In fact switching to a guest mode
600 	 * is very similar to exiting to userspase from rcu point of view. In
601 	 * addition CPU may stay in a guest mode for quite a long time (up to
602 	 * one time slice). Lets treat guest mode as quiescent state, just like
603 	 * we do with user-mode execution.
604 	 */
605 	rcu_virt_note_context_switch(smp_processor_id());
606 }
607 
608 static inline void kvm_guest_exit(void)
609 {
610 	account_system_vtime(current);
611 	current->flags &= ~PF_VCPU;
612 }
613 
614 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
615 {
616 	return gfn_to_memslot(kvm, gfn)->id;
617 }
618 
619 static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
620 					       gfn_t gfn)
621 {
622 	return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
623 }
624 
625 static inline gpa_t gfn_to_gpa(gfn_t gfn)
626 {
627 	return (gpa_t)gfn << PAGE_SHIFT;
628 }
629 
630 static inline gfn_t gpa_to_gfn(gpa_t gpa)
631 {
632 	return (gfn_t)(gpa >> PAGE_SHIFT);
633 }
634 
635 static inline hpa_t pfn_to_hpa(pfn_t pfn)
636 {
637 	return (hpa_t)pfn << PAGE_SHIFT;
638 }
639 
640 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
641 {
642 	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
643 }
644 
645 enum kvm_stat_kind {
646 	KVM_STAT_VM,
647 	KVM_STAT_VCPU,
648 };
649 
650 struct kvm_stats_debugfs_item {
651 	const char *name;
652 	int offset;
653 	enum kvm_stat_kind kind;
654 	struct dentry *dentry;
655 };
656 extern struct kvm_stats_debugfs_item debugfs_entries[];
657 extern struct dentry *kvm_debugfs_dir;
658 
659 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
660 static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
661 {
662 	if (unlikely(vcpu->kvm->mmu_notifier_count))
663 		return 1;
664 	/*
665 	 * Both reads happen under the mmu_lock and both values are
666 	 * modified under mmu_lock, so there's no need of smb_rmb()
667 	 * here in between, otherwise mmu_notifier_count should be
668 	 * read before mmu_notifier_seq, see
669 	 * mmu_notifier_invalidate_range_end write side.
670 	 */
671 	if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
672 		return 1;
673 	return 0;
674 }
675 #endif
676 
677 #ifdef CONFIG_HAVE_KVM_IRQCHIP
678 
679 #define KVM_MAX_IRQ_ROUTES 1024
680 
681 int kvm_setup_default_irq_routing(struct kvm *kvm);
682 int kvm_set_irq_routing(struct kvm *kvm,
683 			const struct kvm_irq_routing_entry *entries,
684 			unsigned nr,
685 			unsigned flags);
686 void kvm_free_irq_routing(struct kvm *kvm);
687 
688 #else
689 
690 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
691 
692 #endif
693 
694 #ifdef CONFIG_HAVE_KVM_EVENTFD
695 
696 void kvm_eventfd_init(struct kvm *kvm);
697 int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
698 void kvm_irqfd_release(struct kvm *kvm);
699 void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
700 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
701 
702 #else
703 
704 static inline void kvm_eventfd_init(struct kvm *kvm) {}
705 
706 static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
707 {
708 	return -EINVAL;
709 }
710 
711 static inline void kvm_irqfd_release(struct kvm *kvm) {}
712 
713 #ifdef CONFIG_HAVE_KVM_IRQCHIP
714 static inline void kvm_irq_routing_update(struct kvm *kvm,
715 					  struct kvm_irq_routing_table *irq_rt)
716 {
717 	rcu_assign_pointer(kvm->irq_routing, irq_rt);
718 }
719 #endif
720 
721 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
722 {
723 	return -ENOSYS;
724 }
725 
726 #endif /* CONFIG_HAVE_KVM_EVENTFD */
727 
728 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
729 static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
730 {
731 	return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
732 }
733 #endif
734 
735 #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
736 
737 long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
738 				  unsigned long arg);
739 
740 #else
741 
742 static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
743 						unsigned long arg)
744 {
745 	return -ENOTTY;
746 }
747 
748 #endif
749 
750 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
751 {
752 	set_bit(req, &vcpu->requests);
753 }
754 
755 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
756 {
757 	if (test_bit(req, &vcpu->requests)) {
758 		clear_bit(req, &vcpu->requests);
759 		return true;
760 	} else {
761 		return false;
762 	}
763 }
764 
765 #endif
766 
767