xref: /linux-6.15/include/linux/kvm_host.h (revision dde5e3ff)
1 #ifndef __KVM_HOST_H
2 #define __KVM_HOST_H
3 
4 /*
5  * This work is licensed under the terms of the GNU GPL, version 2.  See
6  * the COPYING file in the top-level directory.
7  */
8 
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/mm.h>
17 #include <linux/preempt.h>
18 #include <linux/marker.h>
19 #include <linux/msi.h>
20 #include <asm/signal.h>
21 
22 #include <linux/kvm.h>
23 #include <linux/kvm_para.h>
24 
25 #include <linux/kvm_types.h>
26 
27 #include <asm/kvm_host.h>
28 
29 /*
30  * vcpu->requests bit members
31  */
32 #define KVM_REQ_TLB_FLUSH          0
33 #define KVM_REQ_MIGRATE_TIMER      1
34 #define KVM_REQ_REPORT_TPR_ACCESS  2
35 #define KVM_REQ_MMU_RELOAD         3
36 #define KVM_REQ_TRIPLE_FAULT       4
37 #define KVM_REQ_PENDING_TIMER      5
38 #define KVM_REQ_UNHALT             6
39 #define KVM_REQ_MMU_SYNC           7
40 #define KVM_REQ_KVMCLOCK_UPDATE    8
41 #define KVM_REQ_KICK               9
42 
43 #define KVM_USERSPACE_IRQ_SOURCE_ID	0
44 
45 struct kvm_vcpu;
46 extern struct kmem_cache *kvm_vcpu_cache;
47 
48 /*
49  * It would be nice to use something smarter than a linear search, TBD...
50  * Thankfully we dont expect many devices to register (famous last words :),
51  * so until then it will suffice.  At least its abstracted so we can change
52  * in one place.
53  */
54 struct kvm_io_bus {
55 	int                   dev_count;
56 #define NR_IOBUS_DEVS 6
57 	struct kvm_io_device *devs[NR_IOBUS_DEVS];
58 };
59 
60 void kvm_io_bus_init(struct kvm_io_bus *bus);
61 void kvm_io_bus_destroy(struct kvm_io_bus *bus);
62 struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
63 					  gpa_t addr, int len, int is_write);
64 void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
65 			     struct kvm_io_device *dev);
66 
67 struct kvm_vcpu {
68 	struct kvm *kvm;
69 #ifdef CONFIG_PREEMPT_NOTIFIERS
70 	struct preempt_notifier preempt_notifier;
71 #endif
72 	int vcpu_id;
73 	struct mutex mutex;
74 	int   cpu;
75 	struct kvm_run *run;
76 	unsigned long requests;
77 	unsigned long guest_debug;
78 	int fpu_active;
79 	int guest_fpu_loaded;
80 	wait_queue_head_t wq;
81 	int sigset_active;
82 	sigset_t sigset;
83 	struct kvm_vcpu_stat stat;
84 
85 #ifdef CONFIG_HAS_IOMEM
86 	int mmio_needed;
87 	int mmio_read_completed;
88 	int mmio_is_write;
89 	int mmio_size;
90 	unsigned char mmio_data[8];
91 	gpa_t mmio_phys_addr;
92 #endif
93 
94 	struct kvm_vcpu_arch arch;
95 };
96 
97 struct kvm_memory_slot {
98 	gfn_t base_gfn;
99 	unsigned long npages;
100 	unsigned long flags;
101 	unsigned long *rmap;
102 	unsigned long *dirty_bitmap;
103 	struct {
104 		unsigned long rmap_pde;
105 		int write_count;
106 	} *lpage_info;
107 	unsigned long userspace_addr;
108 	int user_alloc;
109 };
110 
111 struct kvm_kernel_irq_routing_entry {
112 	u32 gsi;
113 	u32 type;
114 	int (*set)(struct kvm_kernel_irq_routing_entry *e,
115 		    struct kvm *kvm, int level);
116 	union {
117 		struct {
118 			unsigned irqchip;
119 			unsigned pin;
120 		} irqchip;
121 		struct msi_msg msi;
122 	};
123 	struct list_head link;
124 };
125 
126 struct kvm {
127 	struct mutex lock; /* protects the vcpus array and APIC accesses */
128 	spinlock_t mmu_lock;
129 	spinlock_t requests_lock;
130 	struct rw_semaphore slots_lock;
131 	struct mm_struct *mm; /* userspace tied to this vm */
132 	int nmemslots;
133 	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
134 					KVM_PRIVATE_MEM_SLOTS];
135 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
136 	struct list_head vm_list;
137 	struct kvm_io_bus mmio_bus;
138 	struct kvm_io_bus pio_bus;
139 	struct kvm_vm_stat stat;
140 	struct kvm_arch arch;
141 	atomic_t users_count;
142 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
143 	struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
144 	struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
145 #endif
146 
147 #ifdef CONFIG_HAVE_KVM_IRQCHIP
148 	struct list_head irq_routing; /* of kvm_kernel_irq_routing_entry */
149 	struct hlist_head mask_notifier_list;
150 #endif
151 
152 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
153 	struct mmu_notifier mmu_notifier;
154 	unsigned long mmu_notifier_seq;
155 	long mmu_notifier_count;
156 #endif
157 };
158 
159 /* The guest did something we don't support. */
160 #define pr_unimpl(vcpu, fmt, ...)					\
161  do {									\
162 	if (printk_ratelimit())						\
163 		printk(KERN_ERR "kvm: %i: cpu%i " fmt,			\
164 		       current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
165  } while (0)
166 
167 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
168 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
169 
170 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
171 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
172 
173 void vcpu_load(struct kvm_vcpu *vcpu);
174 void vcpu_put(struct kvm_vcpu *vcpu);
175 
176 int kvm_init(void *opaque, unsigned int vcpu_size,
177 		  struct module *module);
178 void kvm_exit(void);
179 
180 void kvm_get_kvm(struct kvm *kvm);
181 void kvm_put_kvm(struct kvm *kvm);
182 
183 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
184 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
185 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
186 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
187 
188 extern struct page *bad_page;
189 extern pfn_t bad_pfn;
190 
191 int is_error_page(struct page *page);
192 int is_error_pfn(pfn_t pfn);
193 int kvm_is_error_hva(unsigned long addr);
194 int kvm_set_memory_region(struct kvm *kvm,
195 			  struct kvm_userspace_memory_region *mem,
196 			  int user_alloc);
197 int __kvm_set_memory_region(struct kvm *kvm,
198 			    struct kvm_userspace_memory_region *mem,
199 			    int user_alloc);
200 int kvm_arch_set_memory_region(struct kvm *kvm,
201 				struct kvm_userspace_memory_region *mem,
202 				struct kvm_memory_slot old,
203 				int user_alloc);
204 void kvm_arch_flush_shadow(struct kvm *kvm);
205 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
206 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
207 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
208 void kvm_release_page_clean(struct page *page);
209 void kvm_release_page_dirty(struct page *page);
210 void kvm_set_page_dirty(struct page *page);
211 void kvm_set_page_accessed(struct page *page);
212 
213 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
214 void kvm_release_pfn_dirty(pfn_t);
215 void kvm_release_pfn_clean(pfn_t pfn);
216 void kvm_set_pfn_dirty(pfn_t pfn);
217 void kvm_set_pfn_accessed(pfn_t pfn);
218 void kvm_get_pfn(pfn_t pfn);
219 
220 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
221 			int len);
222 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
223 			  unsigned long len);
224 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
225 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
226 			 int offset, int len);
227 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
228 		    unsigned long len);
229 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
230 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
231 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
232 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
233 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
234 
235 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
236 void kvm_resched(struct kvm_vcpu *vcpu);
237 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
238 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
239 void kvm_flush_remote_tlbs(struct kvm *kvm);
240 void kvm_reload_remote_mmus(struct kvm *kvm);
241 
242 long kvm_arch_dev_ioctl(struct file *filp,
243 			unsigned int ioctl, unsigned long arg);
244 long kvm_arch_vcpu_ioctl(struct file *filp,
245 			 unsigned int ioctl, unsigned long arg);
246 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
247 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
248 
249 int kvm_dev_ioctl_check_extension(long ext);
250 
251 int kvm_get_dirty_log(struct kvm *kvm,
252 			struct kvm_dirty_log *log, int *is_dirty);
253 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
254 				struct kvm_dirty_log *log);
255 
256 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
257 				   struct
258 				   kvm_userspace_memory_region *mem,
259 				   int user_alloc);
260 long kvm_arch_vm_ioctl(struct file *filp,
261 		       unsigned int ioctl, unsigned long arg);
262 
263 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
264 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
265 
266 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
267 				    struct kvm_translation *tr);
268 
269 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
270 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
271 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
272 				  struct kvm_sregs *sregs);
273 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
274 				  struct kvm_sregs *sregs);
275 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
276 				    struct kvm_mp_state *mp_state);
277 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
278 				    struct kvm_mp_state *mp_state);
279 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
280 					struct kvm_guest_debug *dbg);
281 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
282 
283 int kvm_arch_init(void *opaque);
284 void kvm_arch_exit(void);
285 
286 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
287 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
288 
289 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
290 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
291 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
292 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
293 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
294 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
295 
296 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
297 void kvm_arch_hardware_enable(void *garbage);
298 void kvm_arch_hardware_disable(void *garbage);
299 int kvm_arch_hardware_setup(void);
300 void kvm_arch_hardware_unsetup(void);
301 void kvm_arch_check_processor_compat(void *rtn);
302 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
303 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
304 
305 void kvm_free_physmem(struct kvm *kvm);
306 
307 struct  kvm *kvm_arch_create_vm(void);
308 void kvm_arch_destroy_vm(struct kvm *kvm);
309 void kvm_free_all_assigned_devices(struct kvm *kvm);
310 void kvm_arch_sync_events(struct kvm *kvm);
311 
312 int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
313 int kvm_cpu_has_interrupt(struct kvm_vcpu *v);
314 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
315 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
316 
317 int kvm_is_mmio_pfn(pfn_t pfn);
318 
319 struct kvm_irq_ack_notifier {
320 	struct hlist_node link;
321 	unsigned gsi;
322 	void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
323 };
324 
325 #define KVM_ASSIGNED_MSIX_PENDING		0x1
326 struct kvm_guest_msix_entry {
327 	u32 vector;
328 	u16 entry;
329 	u16 flags;
330 };
331 
332 struct kvm_assigned_dev_kernel {
333 	struct kvm_irq_ack_notifier ack_notifier;
334 	struct work_struct interrupt_work;
335 	struct list_head list;
336 	int assigned_dev_id;
337 	int host_busnr;
338 	int host_devfn;
339 	unsigned int entries_nr;
340 	int host_irq;
341 	bool host_irq_disabled;
342 	struct msix_entry *host_msix_entries;
343 	int guest_irq;
344 	struct kvm_guest_msix_entry *guest_msix_entries;
345 	unsigned long irq_requested_type;
346 	int irq_source_id;
347 	int flags;
348 	struct pci_dev *dev;
349 	struct kvm *kvm;
350 	spinlock_t assigned_dev_lock;
351 };
352 
353 struct kvm_irq_mask_notifier {
354 	void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
355 	int irq;
356 	struct hlist_node link;
357 };
358 
359 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
360 				    struct kvm_irq_mask_notifier *kimn);
361 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
362 				      struct kvm_irq_mask_notifier *kimn);
363 void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
364 
365 int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
366 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
367 void kvm_register_irq_ack_notifier(struct kvm *kvm,
368 				   struct kvm_irq_ack_notifier *kian);
369 void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian);
370 int kvm_request_irq_source_id(struct kvm *kvm);
371 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
372 
373 /* For vcpu->arch.iommu_flags */
374 #define KVM_IOMMU_CACHE_COHERENCY	0x1
375 
376 #ifdef CONFIG_IOMMU_API
377 int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
378 			unsigned long npages);
379 int kvm_iommu_map_guest(struct kvm *kvm);
380 int kvm_iommu_unmap_guest(struct kvm *kvm);
381 int kvm_assign_device(struct kvm *kvm,
382 		      struct kvm_assigned_dev_kernel *assigned_dev);
383 int kvm_deassign_device(struct kvm *kvm,
384 			struct kvm_assigned_dev_kernel *assigned_dev);
385 #else /* CONFIG_IOMMU_API */
386 static inline int kvm_iommu_map_pages(struct kvm *kvm,
387 				      gfn_t base_gfn,
388 				      unsigned long npages)
389 {
390 	return 0;
391 }
392 
393 static inline int kvm_iommu_map_guest(struct kvm *kvm)
394 {
395 	return -ENODEV;
396 }
397 
398 static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
399 {
400 	return 0;
401 }
402 
403 static inline int kvm_assign_device(struct kvm *kvm,
404 		struct kvm_assigned_dev_kernel *assigned_dev)
405 {
406 	return 0;
407 }
408 
409 static inline int kvm_deassign_device(struct kvm *kvm,
410 		struct kvm_assigned_dev_kernel *assigned_dev)
411 {
412 	return 0;
413 }
414 #endif /* CONFIG_IOMMU_API */
415 
416 static inline void kvm_guest_enter(void)
417 {
418 	account_system_vtime(current);
419 	current->flags |= PF_VCPU;
420 }
421 
422 static inline void kvm_guest_exit(void)
423 {
424 	account_system_vtime(current);
425 	current->flags &= ~PF_VCPU;
426 }
427 
428 static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
429 {
430 	return slot - kvm->memslots;
431 }
432 
433 static inline gpa_t gfn_to_gpa(gfn_t gfn)
434 {
435 	return (gpa_t)gfn << PAGE_SHIFT;
436 }
437 
438 static inline hpa_t pfn_to_hpa(pfn_t pfn)
439 {
440 	return (hpa_t)pfn << PAGE_SHIFT;
441 }
442 
443 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
444 {
445 	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
446 }
447 
448 enum kvm_stat_kind {
449 	KVM_STAT_VM,
450 	KVM_STAT_VCPU,
451 };
452 
453 struct kvm_stats_debugfs_item {
454 	const char *name;
455 	int offset;
456 	enum kvm_stat_kind kind;
457 	struct dentry *dentry;
458 };
459 extern struct kvm_stats_debugfs_item debugfs_entries[];
460 extern struct dentry *kvm_debugfs_dir;
461 
462 #define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \
463 	trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
464 						vcpu, 5, d1, d2, d3, d4, d5)
465 #define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \
466 	trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
467 						vcpu, 4, d1, d2, d3, d4, 0)
468 #define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \
469 	trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
470 						vcpu, 3, d1, d2, d3, 0, 0)
471 #define KVMTRACE_2D(evt, vcpu, d1, d2, name) \
472 	trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
473 						vcpu, 2, d1, d2, 0, 0, 0)
474 #define KVMTRACE_1D(evt, vcpu, d1, name) \
475 	trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
476 						vcpu, 1, d1, 0, 0, 0, 0)
477 #define KVMTRACE_0D(evt, vcpu, name) \
478 	trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
479 						vcpu, 0, 0, 0, 0, 0, 0)
480 
481 #ifdef CONFIG_KVM_TRACE
482 int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg);
483 void kvm_trace_cleanup(void);
484 #else
485 static inline
486 int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
487 {
488 	return -EINVAL;
489 }
490 #define kvm_trace_cleanup() ((void)0)
491 #endif
492 
493 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
494 static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
495 {
496 	if (unlikely(vcpu->kvm->mmu_notifier_count))
497 		return 1;
498 	/*
499 	 * Both reads happen under the mmu_lock and both values are
500 	 * modified under mmu_lock, so there's no need of smb_rmb()
501 	 * here in between, otherwise mmu_notifier_count should be
502 	 * read before mmu_notifier_seq, see
503 	 * mmu_notifier_invalidate_range_end write side.
504 	 */
505 	if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
506 		return 1;
507 	return 0;
508 }
509 #endif
510 
511 #ifdef CONFIG_HAVE_KVM_IRQCHIP
512 
513 #define KVM_MAX_IRQ_ROUTES 1024
514 
515 int kvm_setup_default_irq_routing(struct kvm *kvm);
516 int kvm_set_irq_routing(struct kvm *kvm,
517 			const struct kvm_irq_routing_entry *entries,
518 			unsigned nr,
519 			unsigned flags);
520 void kvm_free_irq_routing(struct kvm *kvm);
521 
522 #else
523 
524 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
525 
526 #endif
527 
528 #endif
529