1 #ifndef __KVM_HOST_H 2 #define __KVM_HOST_H 3 4 /* 5 * This work is licensed under the terms of the GNU GPL, version 2. See 6 * the COPYING file in the top-level directory. 7 */ 8 9 #include <linux/types.h> 10 #include <linux/hardirq.h> 11 #include <linux/list.h> 12 #include <linux/mutex.h> 13 #include <linux/spinlock.h> 14 #include <linux/signal.h> 15 #include <linux/sched.h> 16 #include <linux/mm.h> 17 #include <linux/preempt.h> 18 #include <linux/marker.h> 19 #include <linux/msi.h> 20 #include <asm/signal.h> 21 22 #include <linux/kvm.h> 23 #include <linux/kvm_para.h> 24 25 #include <linux/kvm_types.h> 26 27 #include <asm/kvm_host.h> 28 29 /* 30 * vcpu->requests bit members 31 */ 32 #define KVM_REQ_TLB_FLUSH 0 33 #define KVM_REQ_MIGRATE_TIMER 1 34 #define KVM_REQ_REPORT_TPR_ACCESS 2 35 #define KVM_REQ_MMU_RELOAD 3 36 #define KVM_REQ_TRIPLE_FAULT 4 37 #define KVM_REQ_PENDING_TIMER 5 38 #define KVM_REQ_UNHALT 6 39 #define KVM_REQ_MMU_SYNC 7 40 #define KVM_REQ_KVMCLOCK_UPDATE 8 41 #define KVM_REQ_KICK 9 42 43 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 44 45 struct kvm_vcpu; 46 extern struct kmem_cache *kvm_vcpu_cache; 47 48 /* 49 * It would be nice to use something smarter than a linear search, TBD... 50 * Thankfully we dont expect many devices to register (famous last words :), 51 * so until then it will suffice. At least its abstracted so we can change 52 * in one place. 53 */ 54 struct kvm_io_bus { 55 int dev_count; 56 #define NR_IOBUS_DEVS 6 57 struct kvm_io_device *devs[NR_IOBUS_DEVS]; 58 }; 59 60 void kvm_io_bus_init(struct kvm_io_bus *bus); 61 void kvm_io_bus_destroy(struct kvm_io_bus *bus); 62 struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, 63 gpa_t addr, int len, int is_write); 64 void kvm_io_bus_register_dev(struct kvm_io_bus *bus, 65 struct kvm_io_device *dev); 66 67 struct kvm_vcpu { 68 struct kvm *kvm; 69 #ifdef CONFIG_PREEMPT_NOTIFIERS 70 struct preempt_notifier preempt_notifier; 71 #endif 72 int vcpu_id; 73 struct mutex mutex; 74 int cpu; 75 struct kvm_run *run; 76 unsigned long requests; 77 unsigned long guest_debug; 78 int fpu_active; 79 int guest_fpu_loaded; 80 wait_queue_head_t wq; 81 int sigset_active; 82 sigset_t sigset; 83 struct kvm_vcpu_stat stat; 84 85 #ifdef CONFIG_HAS_IOMEM 86 int mmio_needed; 87 int mmio_read_completed; 88 int mmio_is_write; 89 int mmio_size; 90 unsigned char mmio_data[8]; 91 gpa_t mmio_phys_addr; 92 #endif 93 94 struct kvm_vcpu_arch arch; 95 }; 96 97 struct kvm_memory_slot { 98 gfn_t base_gfn; 99 unsigned long npages; 100 unsigned long flags; 101 unsigned long *rmap; 102 unsigned long *dirty_bitmap; 103 struct { 104 unsigned long rmap_pde; 105 int write_count; 106 } *lpage_info; 107 unsigned long userspace_addr; 108 int user_alloc; 109 }; 110 111 struct kvm_kernel_irq_routing_entry { 112 u32 gsi; 113 int (*set)(struct kvm_kernel_irq_routing_entry *e, 114 struct kvm *kvm, int level); 115 union { 116 struct { 117 unsigned irqchip; 118 unsigned pin; 119 } irqchip; 120 struct msi_msg msi; 121 }; 122 struct list_head link; 123 }; 124 125 struct kvm { 126 struct mutex lock; /* protects the vcpus array and APIC accesses */ 127 spinlock_t mmu_lock; 128 struct rw_semaphore slots_lock; 129 struct mm_struct *mm; /* userspace tied to this vm */ 130 int nmemslots; 131 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + 132 KVM_PRIVATE_MEM_SLOTS]; 133 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; 134 struct list_head vm_list; 135 struct kvm_io_bus mmio_bus; 136 struct kvm_io_bus pio_bus; 137 struct kvm_vm_stat stat; 138 struct kvm_arch arch; 139 atomic_t users_count; 140 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 141 struct kvm_coalesced_mmio_dev *coalesced_mmio_dev; 142 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; 143 #endif 144 145 #ifdef CONFIG_HAVE_KVM_IRQCHIP 146 struct list_head irq_routing; /* of kvm_kernel_irq_routing_entry */ 147 struct hlist_head mask_notifier_list; 148 #endif 149 150 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER 151 struct mmu_notifier mmu_notifier; 152 unsigned long mmu_notifier_seq; 153 long mmu_notifier_count; 154 #endif 155 }; 156 157 /* The guest did something we don't support. */ 158 #define pr_unimpl(vcpu, fmt, ...) \ 159 do { \ 160 if (printk_ratelimit()) \ 161 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \ 162 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \ 163 } while (0) 164 165 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) 166 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) 167 168 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); 169 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); 170 171 void vcpu_load(struct kvm_vcpu *vcpu); 172 void vcpu_put(struct kvm_vcpu *vcpu); 173 174 int kvm_init(void *opaque, unsigned int vcpu_size, 175 struct module *module); 176 void kvm_exit(void); 177 178 void kvm_get_kvm(struct kvm *kvm); 179 void kvm_put_kvm(struct kvm *kvm); 180 181 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) 182 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) 183 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } 184 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva); 185 186 extern struct page *bad_page; 187 extern pfn_t bad_pfn; 188 189 int is_error_page(struct page *page); 190 int is_error_pfn(pfn_t pfn); 191 int kvm_is_error_hva(unsigned long addr); 192 int kvm_set_memory_region(struct kvm *kvm, 193 struct kvm_userspace_memory_region *mem, 194 int user_alloc); 195 int __kvm_set_memory_region(struct kvm *kvm, 196 struct kvm_userspace_memory_region *mem, 197 int user_alloc); 198 int kvm_arch_set_memory_region(struct kvm *kvm, 199 struct kvm_userspace_memory_region *mem, 200 struct kvm_memory_slot old, 201 int user_alloc); 202 void kvm_arch_flush_shadow(struct kvm *kvm); 203 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); 204 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 205 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 206 void kvm_release_page_clean(struct page *page); 207 void kvm_release_page_dirty(struct page *page); 208 void kvm_set_page_dirty(struct page *page); 209 void kvm_set_page_accessed(struct page *page); 210 211 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 212 void kvm_release_pfn_dirty(pfn_t); 213 void kvm_release_pfn_clean(pfn_t pfn); 214 void kvm_set_pfn_dirty(pfn_t pfn); 215 void kvm_set_pfn_accessed(pfn_t pfn); 216 void kvm_get_pfn(pfn_t pfn); 217 218 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 219 int len); 220 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 221 unsigned long len); 222 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); 223 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 224 int offset, int len); 225 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 226 unsigned long len); 227 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); 228 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); 229 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 230 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); 231 void mark_page_dirty(struct kvm *kvm, gfn_t gfn); 232 233 void kvm_vcpu_block(struct kvm_vcpu *vcpu); 234 void kvm_resched(struct kvm_vcpu *vcpu); 235 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); 236 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); 237 void kvm_flush_remote_tlbs(struct kvm *kvm); 238 void kvm_reload_remote_mmus(struct kvm *kvm); 239 240 long kvm_arch_dev_ioctl(struct file *filp, 241 unsigned int ioctl, unsigned long arg); 242 long kvm_arch_vcpu_ioctl(struct file *filp, 243 unsigned int ioctl, unsigned long arg); 244 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 245 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); 246 247 int kvm_dev_ioctl_check_extension(long ext); 248 249 int kvm_get_dirty_log(struct kvm *kvm, 250 struct kvm_dirty_log *log, int *is_dirty); 251 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 252 struct kvm_dirty_log *log); 253 254 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 255 struct 256 kvm_userspace_memory_region *mem, 257 int user_alloc); 258 long kvm_arch_vm_ioctl(struct file *filp, 259 unsigned int ioctl, unsigned long arg); 260 261 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 262 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 263 264 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 265 struct kvm_translation *tr); 266 267 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 268 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 269 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 270 struct kvm_sregs *sregs); 271 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 272 struct kvm_sregs *sregs); 273 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 274 struct kvm_mp_state *mp_state); 275 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 276 struct kvm_mp_state *mp_state); 277 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 278 struct kvm_guest_debug *dbg); 279 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); 280 281 int kvm_arch_init(void *opaque); 282 void kvm_arch_exit(void); 283 284 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); 285 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); 286 287 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); 288 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 289 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); 290 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); 291 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); 292 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); 293 294 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu); 295 void kvm_arch_hardware_enable(void *garbage); 296 void kvm_arch_hardware_disable(void *garbage); 297 int kvm_arch_hardware_setup(void); 298 void kvm_arch_hardware_unsetup(void); 299 void kvm_arch_check_processor_compat(void *rtn); 300 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); 301 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); 302 303 void kvm_free_physmem(struct kvm *kvm); 304 305 struct kvm *kvm_arch_create_vm(void); 306 void kvm_arch_destroy_vm(struct kvm *kvm); 307 void kvm_free_all_assigned_devices(struct kvm *kvm); 308 void kvm_arch_sync_events(struct kvm *kvm); 309 310 int kvm_cpu_get_interrupt(struct kvm_vcpu *v); 311 int kvm_cpu_has_interrupt(struct kvm_vcpu *v); 312 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 313 void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 314 315 int kvm_is_mmio_pfn(pfn_t pfn); 316 317 struct kvm_irq_ack_notifier { 318 struct hlist_node link; 319 unsigned gsi; 320 void (*irq_acked)(struct kvm_irq_ack_notifier *kian); 321 }; 322 323 #define KVM_ASSIGNED_MSIX_PENDING 0x1 324 struct kvm_guest_msix_entry { 325 u32 vector; 326 u16 entry; 327 u16 flags; 328 }; 329 330 struct kvm_assigned_dev_kernel { 331 struct kvm_irq_ack_notifier ack_notifier; 332 struct work_struct interrupt_work; 333 struct list_head list; 334 int assigned_dev_id; 335 int host_busnr; 336 int host_devfn; 337 unsigned int entries_nr; 338 int host_irq; 339 bool host_irq_disabled; 340 struct msix_entry *host_msix_entries; 341 int guest_irq; 342 struct kvm_guest_msix_entry *guest_msix_entries; 343 unsigned long irq_requested_type; 344 int irq_source_id; 345 int flags; 346 struct pci_dev *dev; 347 struct kvm *kvm; 348 spinlock_t assigned_dev_lock; 349 }; 350 351 struct kvm_irq_mask_notifier { 352 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); 353 int irq; 354 struct hlist_node link; 355 }; 356 357 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, 358 struct kvm_irq_mask_notifier *kimn); 359 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, 360 struct kvm_irq_mask_notifier *kimn); 361 void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask); 362 363 int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level); 364 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); 365 void kvm_register_irq_ack_notifier(struct kvm *kvm, 366 struct kvm_irq_ack_notifier *kian); 367 void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian); 368 int kvm_request_irq_source_id(struct kvm *kvm); 369 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); 370 371 /* For vcpu->arch.iommu_flags */ 372 #define KVM_IOMMU_CACHE_COHERENCY 0x1 373 374 #ifdef CONFIG_IOMMU_API 375 int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, 376 unsigned long npages); 377 int kvm_iommu_map_guest(struct kvm *kvm); 378 int kvm_iommu_unmap_guest(struct kvm *kvm); 379 int kvm_assign_device(struct kvm *kvm, 380 struct kvm_assigned_dev_kernel *assigned_dev); 381 int kvm_deassign_device(struct kvm *kvm, 382 struct kvm_assigned_dev_kernel *assigned_dev); 383 #else /* CONFIG_IOMMU_API */ 384 static inline int kvm_iommu_map_pages(struct kvm *kvm, 385 gfn_t base_gfn, 386 unsigned long npages) 387 { 388 return 0; 389 } 390 391 static inline int kvm_iommu_map_guest(struct kvm *kvm) 392 { 393 return -ENODEV; 394 } 395 396 static inline int kvm_iommu_unmap_guest(struct kvm *kvm) 397 { 398 return 0; 399 } 400 401 static inline int kvm_assign_device(struct kvm *kvm, 402 struct kvm_assigned_dev_kernel *assigned_dev) 403 { 404 return 0; 405 } 406 407 static inline int kvm_deassign_device(struct kvm *kvm, 408 struct kvm_assigned_dev_kernel *assigned_dev) 409 { 410 return 0; 411 } 412 #endif /* CONFIG_IOMMU_API */ 413 414 static inline void kvm_guest_enter(void) 415 { 416 account_system_vtime(current); 417 current->flags |= PF_VCPU; 418 } 419 420 static inline void kvm_guest_exit(void) 421 { 422 account_system_vtime(current); 423 current->flags &= ~PF_VCPU; 424 } 425 426 static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot) 427 { 428 return slot - kvm->memslots; 429 } 430 431 static inline gpa_t gfn_to_gpa(gfn_t gfn) 432 { 433 return (gpa_t)gfn << PAGE_SHIFT; 434 } 435 436 static inline hpa_t pfn_to_hpa(pfn_t pfn) 437 { 438 return (hpa_t)pfn << PAGE_SHIFT; 439 } 440 441 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu) 442 { 443 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); 444 } 445 446 enum kvm_stat_kind { 447 KVM_STAT_VM, 448 KVM_STAT_VCPU, 449 }; 450 451 struct kvm_stats_debugfs_item { 452 const char *name; 453 int offset; 454 enum kvm_stat_kind kind; 455 struct dentry *dentry; 456 }; 457 extern struct kvm_stats_debugfs_item debugfs_entries[]; 458 extern struct dentry *kvm_debugfs_dir; 459 460 #define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \ 461 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ 462 vcpu, 5, d1, d2, d3, d4, d5) 463 #define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \ 464 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ 465 vcpu, 4, d1, d2, d3, d4, 0) 466 #define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \ 467 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ 468 vcpu, 3, d1, d2, d3, 0, 0) 469 #define KVMTRACE_2D(evt, vcpu, d1, d2, name) \ 470 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ 471 vcpu, 2, d1, d2, 0, 0, 0) 472 #define KVMTRACE_1D(evt, vcpu, d1, name) \ 473 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ 474 vcpu, 1, d1, 0, 0, 0, 0) 475 #define KVMTRACE_0D(evt, vcpu, name) \ 476 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ 477 vcpu, 0, 0, 0, 0, 0, 0) 478 479 #ifdef CONFIG_KVM_TRACE 480 int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg); 481 void kvm_trace_cleanup(void); 482 #else 483 static inline 484 int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg) 485 { 486 return -EINVAL; 487 } 488 #define kvm_trace_cleanup() ((void)0) 489 #endif 490 491 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER 492 static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) 493 { 494 if (unlikely(vcpu->kvm->mmu_notifier_count)) 495 return 1; 496 /* 497 * Both reads happen under the mmu_lock and both values are 498 * modified under mmu_lock, so there's no need of smb_rmb() 499 * here in between, otherwise mmu_notifier_count should be 500 * read before mmu_notifier_seq, see 501 * mmu_notifier_invalidate_range_end write side. 502 */ 503 if (vcpu->kvm->mmu_notifier_seq != mmu_seq) 504 return 1; 505 return 0; 506 } 507 #endif 508 509 #ifdef CONFIG_HAVE_KVM_IRQCHIP 510 511 #define KVM_MAX_IRQ_ROUTES 1024 512 513 int kvm_setup_default_irq_routing(struct kvm *kvm); 514 int kvm_set_irq_routing(struct kvm *kvm, 515 const struct kvm_irq_routing_entry *entries, 516 unsigned nr, 517 unsigned flags); 518 void kvm_free_irq_routing(struct kvm *kvm); 519 520 #else 521 522 static inline void kvm_free_irq_routing(struct kvm *kvm) {} 523 524 #endif 525 526 #endif 527