1 #ifndef __KVM_HOST_H 2 #define __KVM_HOST_H 3 4 /* 5 * This work is licensed under the terms of the GNU GPL, version 2. See 6 * the COPYING file in the top-level directory. 7 */ 8 9 #include <linux/types.h> 10 #include <linux/hardirq.h> 11 #include <linux/list.h> 12 #include <linux/mutex.h> 13 #include <linux/spinlock.h> 14 #include <linux/signal.h> 15 #include <linux/sched.h> 16 #include <linux/mm.h> 17 #include <linux/preempt.h> 18 #include <linux/msi.h> 19 #include <linux/slab.h> 20 #include <linux/rcupdate.h> 21 #include <asm/signal.h> 22 23 #include <linux/kvm.h> 24 #include <linux/kvm_para.h> 25 26 #include <linux/kvm_types.h> 27 28 #include <asm/kvm_host.h> 29 30 #ifndef KVM_MMIO_SIZE 31 #define KVM_MMIO_SIZE 8 32 #endif 33 34 /* 35 * vcpu->requests bit members 36 */ 37 #define KVM_REQ_TLB_FLUSH 0 38 #define KVM_REQ_MIGRATE_TIMER 1 39 #define KVM_REQ_REPORT_TPR_ACCESS 2 40 #define KVM_REQ_MMU_RELOAD 3 41 #define KVM_REQ_TRIPLE_FAULT 4 42 #define KVM_REQ_PENDING_TIMER 5 43 #define KVM_REQ_UNHALT 6 44 #define KVM_REQ_MMU_SYNC 7 45 #define KVM_REQ_CLOCK_UPDATE 8 46 #define KVM_REQ_KICK 9 47 #define KVM_REQ_DEACTIVATE_FPU 10 48 #define KVM_REQ_EVENT 11 49 #define KVM_REQ_APF_HALT 12 50 #define KVM_REQ_STEAL_UPDATE 13 51 52 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 53 54 struct kvm; 55 struct kvm_vcpu; 56 extern struct kmem_cache *kvm_vcpu_cache; 57 58 /* 59 * It would be nice to use something smarter than a linear search, TBD... 60 * Thankfully we dont expect many devices to register (famous last words :), 61 * so until then it will suffice. At least its abstracted so we can change 62 * in one place. 63 */ 64 struct kvm_io_bus { 65 int dev_count; 66 #define NR_IOBUS_DEVS 200 67 struct kvm_io_device *devs[NR_IOBUS_DEVS]; 68 }; 69 70 enum kvm_bus { 71 KVM_MMIO_BUS, 72 KVM_PIO_BUS, 73 KVM_NR_BUSES 74 }; 75 76 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 77 int len, const void *val); 78 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, 79 void *val); 80 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, 81 struct kvm_io_device *dev); 82 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 83 struct kvm_io_device *dev); 84 85 #ifdef CONFIG_KVM_ASYNC_PF 86 struct kvm_async_pf { 87 struct work_struct work; 88 struct list_head link; 89 struct list_head queue; 90 struct kvm_vcpu *vcpu; 91 struct mm_struct *mm; 92 gva_t gva; 93 unsigned long addr; 94 struct kvm_arch_async_pf arch; 95 struct page *page; 96 bool done; 97 }; 98 99 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); 100 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); 101 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, 102 struct kvm_arch_async_pf *arch); 103 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); 104 #endif 105 106 enum { 107 OUTSIDE_GUEST_MODE, 108 IN_GUEST_MODE, 109 EXITING_GUEST_MODE 110 }; 111 112 struct kvm_vcpu { 113 struct kvm *kvm; 114 #ifdef CONFIG_PREEMPT_NOTIFIERS 115 struct preempt_notifier preempt_notifier; 116 #endif 117 int cpu; 118 int vcpu_id; 119 int srcu_idx; 120 int mode; 121 unsigned long requests; 122 unsigned long guest_debug; 123 124 struct mutex mutex; 125 struct kvm_run *run; 126 127 int fpu_active; 128 int guest_fpu_loaded, guest_xcr0_loaded; 129 wait_queue_head_t wq; 130 struct pid *pid; 131 int sigset_active; 132 sigset_t sigset; 133 struct kvm_vcpu_stat stat; 134 135 #ifdef CONFIG_HAS_IOMEM 136 int mmio_needed; 137 int mmio_read_completed; 138 int mmio_is_write; 139 int mmio_size; 140 int mmio_index; 141 unsigned char mmio_data[KVM_MMIO_SIZE]; 142 gpa_t mmio_phys_addr; 143 #endif 144 145 #ifdef CONFIG_KVM_ASYNC_PF 146 struct { 147 u32 queued; 148 struct list_head queue; 149 struct list_head done; 150 spinlock_t lock; 151 } async_pf; 152 #endif 153 154 struct kvm_vcpu_arch arch; 155 }; 156 157 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) 158 { 159 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); 160 } 161 162 /* 163 * Some of the bitops functions do not support too long bitmaps. 164 * This number must be determined not to exceed such limits. 165 */ 166 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) 167 168 struct kvm_lpage_info { 169 unsigned long rmap_pde; 170 int write_count; 171 }; 172 173 struct kvm_memory_slot { 174 gfn_t base_gfn; 175 unsigned long npages; 176 unsigned long flags; 177 unsigned long *rmap; 178 unsigned long *dirty_bitmap; 179 unsigned long *dirty_bitmap_head; 180 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; 181 unsigned long userspace_addr; 182 int user_alloc; 183 int id; 184 }; 185 186 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) 187 { 188 return ALIGN(memslot->npages, BITS_PER_LONG) / 8; 189 } 190 191 struct kvm_kernel_irq_routing_entry { 192 u32 gsi; 193 u32 type; 194 int (*set)(struct kvm_kernel_irq_routing_entry *e, 195 struct kvm *kvm, int irq_source_id, int level); 196 union { 197 struct { 198 unsigned irqchip; 199 unsigned pin; 200 } irqchip; 201 struct msi_msg msi; 202 }; 203 struct hlist_node link; 204 }; 205 206 #ifdef __KVM_HAVE_IOAPIC 207 208 struct kvm_irq_routing_table { 209 int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS]; 210 struct kvm_kernel_irq_routing_entry *rt_entries; 211 u32 nr_rt_entries; 212 /* 213 * Array indexed by gsi. Each entry contains list of irq chips 214 * the gsi is connected to. 215 */ 216 struct hlist_head map[0]; 217 }; 218 219 #else 220 221 struct kvm_irq_routing_table {}; 222 223 #endif 224 225 struct kvm_memslots { 226 int nmemslots; 227 u64 generation; 228 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + 229 KVM_PRIVATE_MEM_SLOTS]; 230 }; 231 232 struct kvm { 233 spinlock_t mmu_lock; 234 struct mutex slots_lock; 235 struct mm_struct *mm; /* userspace tied to this vm */ 236 struct kvm_memslots *memslots; 237 struct srcu_struct srcu; 238 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 239 u32 bsp_vcpu_id; 240 struct kvm_vcpu *bsp_vcpu; 241 #endif 242 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; 243 atomic_t online_vcpus; 244 int last_boosted_vcpu; 245 struct list_head vm_list; 246 struct mutex lock; 247 struct kvm_io_bus *buses[KVM_NR_BUSES]; 248 #ifdef CONFIG_HAVE_KVM_EVENTFD 249 struct { 250 spinlock_t lock; 251 struct list_head items; 252 } irqfds; 253 struct list_head ioeventfds; 254 #endif 255 struct kvm_vm_stat stat; 256 struct kvm_arch arch; 257 atomic_t users_count; 258 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 259 struct kvm_coalesced_mmio_dev *coalesced_mmio_dev; 260 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; 261 #endif 262 263 struct mutex irq_lock; 264 #ifdef CONFIG_HAVE_KVM_IRQCHIP 265 /* 266 * Update side is protected by irq_lock and, 267 * if configured, irqfds.lock. 268 */ 269 struct kvm_irq_routing_table __rcu *irq_routing; 270 struct hlist_head mask_notifier_list; 271 struct hlist_head irq_ack_notifier_list; 272 #endif 273 274 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER 275 struct mmu_notifier mmu_notifier; 276 unsigned long mmu_notifier_seq; 277 long mmu_notifier_count; 278 #endif 279 long tlbs_dirty; 280 }; 281 282 /* The guest did something we don't support. */ 283 #define pr_unimpl(vcpu, fmt, ...) \ 284 do { \ 285 if (printk_ratelimit()) \ 286 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \ 287 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \ 288 } while (0) 289 290 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) 291 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) 292 293 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) 294 { 295 smp_rmb(); 296 return kvm->vcpus[i]; 297 } 298 299 #define kvm_for_each_vcpu(idx, vcpup, kvm) \ 300 for (idx = 0; \ 301 idx < atomic_read(&kvm->online_vcpus) && \ 302 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ 303 idx++) 304 305 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); 306 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); 307 308 void vcpu_load(struct kvm_vcpu *vcpu); 309 void vcpu_put(struct kvm_vcpu *vcpu); 310 311 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 312 struct module *module); 313 void kvm_exit(void); 314 315 void kvm_get_kvm(struct kvm *kvm); 316 void kvm_put_kvm(struct kvm *kvm); 317 318 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) 319 { 320 return rcu_dereference_check(kvm->memslots, 321 srcu_read_lock_held(&kvm->srcu) 322 || lockdep_is_held(&kvm->slots_lock)); 323 } 324 325 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) 326 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) 327 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } 328 329 extern struct page *bad_page; 330 extern struct page *fault_page; 331 332 extern pfn_t bad_pfn; 333 extern pfn_t fault_pfn; 334 335 int is_error_page(struct page *page); 336 int is_error_pfn(pfn_t pfn); 337 int is_hwpoison_pfn(pfn_t pfn); 338 int is_fault_pfn(pfn_t pfn); 339 int is_noslot_pfn(pfn_t pfn); 340 int is_invalid_pfn(pfn_t pfn); 341 int kvm_is_error_hva(unsigned long addr); 342 int kvm_set_memory_region(struct kvm *kvm, 343 struct kvm_userspace_memory_region *mem, 344 int user_alloc); 345 int __kvm_set_memory_region(struct kvm *kvm, 346 struct kvm_userspace_memory_region *mem, 347 int user_alloc); 348 int kvm_arch_prepare_memory_region(struct kvm *kvm, 349 struct kvm_memory_slot *memslot, 350 struct kvm_memory_slot old, 351 struct kvm_userspace_memory_region *mem, 352 int user_alloc); 353 void kvm_arch_commit_memory_region(struct kvm *kvm, 354 struct kvm_userspace_memory_region *mem, 355 struct kvm_memory_slot old, 356 int user_alloc); 357 void kvm_disable_largepages(void); 358 void kvm_arch_flush_shadow(struct kvm *kvm); 359 360 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, 361 int nr_pages); 362 363 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 364 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 365 void kvm_release_page_clean(struct page *page); 366 void kvm_release_page_dirty(struct page *page); 367 void kvm_set_page_dirty(struct page *page); 368 void kvm_set_page_accessed(struct page *page); 369 370 pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr); 371 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); 372 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, 373 bool write_fault, bool *writable); 374 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 375 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 376 bool *writable); 377 pfn_t gfn_to_pfn_memslot(struct kvm *kvm, 378 struct kvm_memory_slot *slot, gfn_t gfn); 379 void kvm_release_pfn_dirty(pfn_t); 380 void kvm_release_pfn_clean(pfn_t pfn); 381 void kvm_set_pfn_dirty(pfn_t pfn); 382 void kvm_set_pfn_accessed(pfn_t pfn); 383 void kvm_get_pfn(pfn_t pfn); 384 385 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 386 int len); 387 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 388 unsigned long len); 389 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); 390 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 391 void *data, unsigned long len); 392 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 393 int offset, int len); 394 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 395 unsigned long len); 396 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 397 void *data, unsigned long len); 398 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 399 gpa_t gpa); 400 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); 401 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); 402 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 403 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); 404 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); 405 void mark_page_dirty(struct kvm *kvm, gfn_t gfn); 406 void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, 407 gfn_t gfn); 408 409 void kvm_vcpu_block(struct kvm_vcpu *vcpu); 410 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); 411 void kvm_resched(struct kvm_vcpu *vcpu); 412 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); 413 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); 414 415 void kvm_flush_remote_tlbs(struct kvm *kvm); 416 void kvm_reload_remote_mmus(struct kvm *kvm); 417 418 long kvm_arch_dev_ioctl(struct file *filp, 419 unsigned int ioctl, unsigned long arg); 420 long kvm_arch_vcpu_ioctl(struct file *filp, 421 unsigned int ioctl, unsigned long arg); 422 423 int kvm_dev_ioctl_check_extension(long ext); 424 425 int kvm_get_dirty_log(struct kvm *kvm, 426 struct kvm_dirty_log *log, int *is_dirty); 427 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 428 struct kvm_dirty_log *log); 429 430 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 431 struct 432 kvm_userspace_memory_region *mem, 433 int user_alloc); 434 long kvm_arch_vm_ioctl(struct file *filp, 435 unsigned int ioctl, unsigned long arg); 436 437 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 438 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 439 440 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 441 struct kvm_translation *tr); 442 443 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 444 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 445 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 446 struct kvm_sregs *sregs); 447 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 448 struct kvm_sregs *sregs); 449 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 450 struct kvm_mp_state *mp_state); 451 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 452 struct kvm_mp_state *mp_state); 453 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 454 struct kvm_guest_debug *dbg); 455 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); 456 457 int kvm_arch_init(void *opaque); 458 void kvm_arch_exit(void); 459 460 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); 461 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); 462 463 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); 464 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 465 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); 466 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); 467 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); 468 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); 469 470 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu); 471 int kvm_arch_hardware_enable(void *garbage); 472 void kvm_arch_hardware_disable(void *garbage); 473 int kvm_arch_hardware_setup(void); 474 void kvm_arch_hardware_unsetup(void); 475 void kvm_arch_check_processor_compat(void *rtn); 476 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); 477 478 void kvm_free_physmem(struct kvm *kvm); 479 480 #ifndef __KVM_HAVE_ARCH_VM_ALLOC 481 static inline struct kvm *kvm_arch_alloc_vm(void) 482 { 483 return kzalloc(sizeof(struct kvm), GFP_KERNEL); 484 } 485 486 static inline void kvm_arch_free_vm(struct kvm *kvm) 487 { 488 kfree(kvm); 489 } 490 #endif 491 492 int kvm_arch_init_vm(struct kvm *kvm); 493 void kvm_arch_destroy_vm(struct kvm *kvm); 494 void kvm_free_all_assigned_devices(struct kvm *kvm); 495 void kvm_arch_sync_events(struct kvm *kvm); 496 497 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 498 void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 499 500 int kvm_is_mmio_pfn(pfn_t pfn); 501 502 struct kvm_irq_ack_notifier { 503 struct hlist_node link; 504 unsigned gsi; 505 void (*irq_acked)(struct kvm_irq_ack_notifier *kian); 506 }; 507 508 struct kvm_assigned_dev_kernel { 509 struct kvm_irq_ack_notifier ack_notifier; 510 struct list_head list; 511 int assigned_dev_id; 512 int host_segnr; 513 int host_busnr; 514 int host_devfn; 515 unsigned int entries_nr; 516 int host_irq; 517 bool host_irq_disabled; 518 struct msix_entry *host_msix_entries; 519 int guest_irq; 520 struct msix_entry *guest_msix_entries; 521 unsigned long irq_requested_type; 522 int irq_source_id; 523 int flags; 524 struct pci_dev *dev; 525 struct kvm *kvm; 526 spinlock_t intx_lock; 527 char irq_name[32]; 528 struct pci_saved_state *pci_saved_state; 529 }; 530 531 struct kvm_irq_mask_notifier { 532 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); 533 int irq; 534 struct hlist_node link; 535 }; 536 537 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, 538 struct kvm_irq_mask_notifier *kimn); 539 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, 540 struct kvm_irq_mask_notifier *kimn); 541 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, 542 bool mask); 543 544 #ifdef __KVM_HAVE_IOAPIC 545 void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, 546 union kvm_ioapic_redirect_entry *entry, 547 unsigned long *deliver_bitmask); 548 #endif 549 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level); 550 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, 551 int irq_source_id, int level); 552 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); 553 void kvm_register_irq_ack_notifier(struct kvm *kvm, 554 struct kvm_irq_ack_notifier *kian); 555 void kvm_unregister_irq_ack_notifier(struct kvm *kvm, 556 struct kvm_irq_ack_notifier *kian); 557 int kvm_request_irq_source_id(struct kvm *kvm); 558 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); 559 560 /* For vcpu->arch.iommu_flags */ 561 #define KVM_IOMMU_CACHE_COHERENCY 0x1 562 563 #ifdef CONFIG_IOMMU_API 564 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); 565 int kvm_iommu_map_guest(struct kvm *kvm); 566 int kvm_iommu_unmap_guest(struct kvm *kvm); 567 int kvm_assign_device(struct kvm *kvm, 568 struct kvm_assigned_dev_kernel *assigned_dev); 569 int kvm_deassign_device(struct kvm *kvm, 570 struct kvm_assigned_dev_kernel *assigned_dev); 571 #else /* CONFIG_IOMMU_API */ 572 static inline int kvm_iommu_map_pages(struct kvm *kvm, 573 struct kvm_memory_slot *slot) 574 { 575 return 0; 576 } 577 578 static inline int kvm_iommu_map_guest(struct kvm *kvm) 579 { 580 return -ENODEV; 581 } 582 583 static inline int kvm_iommu_unmap_guest(struct kvm *kvm) 584 { 585 return 0; 586 } 587 588 static inline int kvm_assign_device(struct kvm *kvm, 589 struct kvm_assigned_dev_kernel *assigned_dev) 590 { 591 return 0; 592 } 593 594 static inline int kvm_deassign_device(struct kvm *kvm, 595 struct kvm_assigned_dev_kernel *assigned_dev) 596 { 597 return 0; 598 } 599 #endif /* CONFIG_IOMMU_API */ 600 601 static inline void kvm_guest_enter(void) 602 { 603 BUG_ON(preemptible()); 604 account_system_vtime(current); 605 current->flags |= PF_VCPU; 606 /* KVM does not hold any references to rcu protected data when it 607 * switches CPU into a guest mode. In fact switching to a guest mode 608 * is very similar to exiting to userspase from rcu point of view. In 609 * addition CPU may stay in a guest mode for quite a long time (up to 610 * one time slice). Lets treat guest mode as quiescent state, just like 611 * we do with user-mode execution. 612 */ 613 rcu_virt_note_context_switch(smp_processor_id()); 614 } 615 616 static inline void kvm_guest_exit(void) 617 { 618 account_system_vtime(current); 619 current->flags &= ~PF_VCPU; 620 } 621 622 static inline int memslot_id(struct kvm *kvm, gfn_t gfn) 623 { 624 return gfn_to_memslot(kvm, gfn)->id; 625 } 626 627 static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 628 gfn_t gfn) 629 { 630 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; 631 } 632 633 static inline gpa_t gfn_to_gpa(gfn_t gfn) 634 { 635 return (gpa_t)gfn << PAGE_SHIFT; 636 } 637 638 static inline gfn_t gpa_to_gfn(gpa_t gpa) 639 { 640 return (gfn_t)(gpa >> PAGE_SHIFT); 641 } 642 643 static inline hpa_t pfn_to_hpa(pfn_t pfn) 644 { 645 return (hpa_t)pfn << PAGE_SHIFT; 646 } 647 648 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu) 649 { 650 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); 651 } 652 653 enum kvm_stat_kind { 654 KVM_STAT_VM, 655 KVM_STAT_VCPU, 656 }; 657 658 struct kvm_stats_debugfs_item { 659 const char *name; 660 int offset; 661 enum kvm_stat_kind kind; 662 struct dentry *dentry; 663 }; 664 extern struct kvm_stats_debugfs_item debugfs_entries[]; 665 extern struct dentry *kvm_debugfs_dir; 666 667 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER 668 static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) 669 { 670 if (unlikely(vcpu->kvm->mmu_notifier_count)) 671 return 1; 672 /* 673 * Both reads happen under the mmu_lock and both values are 674 * modified under mmu_lock, so there's no need of smb_rmb() 675 * here in between, otherwise mmu_notifier_count should be 676 * read before mmu_notifier_seq, see 677 * mmu_notifier_invalidate_range_end write side. 678 */ 679 if (vcpu->kvm->mmu_notifier_seq != mmu_seq) 680 return 1; 681 return 0; 682 } 683 #endif 684 685 #ifdef CONFIG_HAVE_KVM_IRQCHIP 686 687 #define KVM_MAX_IRQ_ROUTES 1024 688 689 int kvm_setup_default_irq_routing(struct kvm *kvm); 690 int kvm_set_irq_routing(struct kvm *kvm, 691 const struct kvm_irq_routing_entry *entries, 692 unsigned nr, 693 unsigned flags); 694 void kvm_free_irq_routing(struct kvm *kvm); 695 696 #else 697 698 static inline void kvm_free_irq_routing(struct kvm *kvm) {} 699 700 #endif 701 702 #ifdef CONFIG_HAVE_KVM_EVENTFD 703 704 void kvm_eventfd_init(struct kvm *kvm); 705 int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags); 706 void kvm_irqfd_release(struct kvm *kvm); 707 void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *); 708 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); 709 710 #else 711 712 static inline void kvm_eventfd_init(struct kvm *kvm) {} 713 714 static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags) 715 { 716 return -EINVAL; 717 } 718 719 static inline void kvm_irqfd_release(struct kvm *kvm) {} 720 721 #ifdef CONFIG_HAVE_KVM_IRQCHIP 722 static inline void kvm_irq_routing_update(struct kvm *kvm, 723 struct kvm_irq_routing_table *irq_rt) 724 { 725 rcu_assign_pointer(kvm->irq_routing, irq_rt); 726 } 727 #endif 728 729 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) 730 { 731 return -ENOSYS; 732 } 733 734 #endif /* CONFIG_HAVE_KVM_EVENTFD */ 735 736 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 737 static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) 738 { 739 return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id; 740 } 741 #endif 742 743 #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT 744 745 long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, 746 unsigned long arg); 747 748 #else 749 750 static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, 751 unsigned long arg) 752 { 753 return -ENOTTY; 754 } 755 756 #endif 757 758 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) 759 { 760 set_bit(req, &vcpu->requests); 761 } 762 763 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) 764 { 765 if (test_bit(req, &vcpu->requests)) { 766 clear_bit(req, &vcpu->requests); 767 return true; 768 } else { 769 return false; 770 } 771 } 772 773 #endif 774 775