1 #ifndef __KVM_HOST_H 2 #define __KVM_HOST_H 3 4 /* 5 * This work is licensed under the terms of the GNU GPL, version 2. See 6 * the COPYING file in the top-level directory. 7 */ 8 9 #include <linux/types.h> 10 #include <linux/hardirq.h> 11 #include <linux/list.h> 12 #include <linux/mutex.h> 13 #include <linux/spinlock.h> 14 #include <linux/signal.h> 15 #include <linux/sched.h> 16 #include <linux/bug.h> 17 #include <linux/mm.h> 18 #include <linux/mmu_notifier.h> 19 #include <linux/preempt.h> 20 #include <linux/msi.h> 21 #include <linux/slab.h> 22 #include <linux/rcupdate.h> 23 #include <linux/ratelimit.h> 24 #include <linux/err.h> 25 #include <linux/irqflags.h> 26 #include <linux/context_tracking.h> 27 #include <asm/signal.h> 28 29 #include <linux/kvm.h> 30 #include <linux/kvm_para.h> 31 32 #include <linux/kvm_types.h> 33 34 #include <asm/kvm_host.h> 35 36 #ifndef KVM_MMIO_SIZE 37 #define KVM_MMIO_SIZE 8 38 #endif 39 40 /* 41 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used 42 * in kvm, other bits are visible for userspace which are defined in 43 * include/linux/kvm_h. 44 */ 45 #define KVM_MEMSLOT_INVALID (1UL << 16) 46 47 /* Two fragments for cross MMIO pages. */ 48 #define KVM_MAX_MMIO_FRAGMENTS 2 49 50 /* 51 * For the normal pfn, the highest 12 bits should be zero, 52 * so we can mask bit 62 ~ bit 52 to indicate the error pfn, 53 * mask bit 63 to indicate the noslot pfn. 54 */ 55 #define KVM_PFN_ERR_MASK (0x7ffULL << 52) 56 #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) 57 #define KVM_PFN_NOSLOT (0x1ULL << 63) 58 59 #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) 60 #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) 61 #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) 62 63 /* 64 * error pfns indicate that the gfn is in slot but faild to 65 * translate it to pfn on host. 66 */ 67 static inline bool is_error_pfn(pfn_t pfn) 68 { 69 return !!(pfn & KVM_PFN_ERR_MASK); 70 } 71 72 /* 73 * error_noslot pfns indicate that the gfn can not be 74 * translated to pfn - it is not in slot or failed to 75 * translate it to pfn. 76 */ 77 static inline bool is_error_noslot_pfn(pfn_t pfn) 78 { 79 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); 80 } 81 82 /* noslot pfn indicates that the gfn is not in slot. */ 83 static inline bool is_noslot_pfn(pfn_t pfn) 84 { 85 return pfn == KVM_PFN_NOSLOT; 86 } 87 88 /* 89 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390) 90 * provide own defines and kvm_is_error_hva 91 */ 92 #ifndef KVM_HVA_ERR_BAD 93 94 #define KVM_HVA_ERR_BAD (PAGE_OFFSET) 95 #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) 96 97 static inline bool kvm_is_error_hva(unsigned long addr) 98 { 99 return addr >= PAGE_OFFSET; 100 } 101 102 #endif 103 104 #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) 105 106 static inline bool is_error_page(struct page *page) 107 { 108 return IS_ERR(page); 109 } 110 111 /* 112 * vcpu->requests bit members 113 */ 114 #define KVM_REQ_TLB_FLUSH 0 115 #define KVM_REQ_MIGRATE_TIMER 1 116 #define KVM_REQ_REPORT_TPR_ACCESS 2 117 #define KVM_REQ_MMU_RELOAD 3 118 #define KVM_REQ_TRIPLE_FAULT 4 119 #define KVM_REQ_PENDING_TIMER 5 120 #define KVM_REQ_UNHALT 6 121 #define KVM_REQ_MMU_SYNC 7 122 #define KVM_REQ_CLOCK_UPDATE 8 123 #define KVM_REQ_KICK 9 124 #define KVM_REQ_DEACTIVATE_FPU 10 125 #define KVM_REQ_EVENT 11 126 #define KVM_REQ_APF_HALT 12 127 #define KVM_REQ_STEAL_UPDATE 13 128 #define KVM_REQ_NMI 14 129 #define KVM_REQ_PMU 15 130 #define KVM_REQ_PMI 16 131 #define KVM_REQ_WATCHDOG 17 132 #define KVM_REQ_MASTERCLOCK_UPDATE 18 133 #define KVM_REQ_MCLOCK_INPROGRESS 19 134 #define KVM_REQ_EPR_EXIT 20 135 #define KVM_REQ_SCAN_IOAPIC 21 136 #define KVM_REQ_GLOBAL_CLOCK_UPDATE 22 137 138 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 139 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 140 141 struct kvm; 142 struct kvm_vcpu; 143 extern struct kmem_cache *kvm_vcpu_cache; 144 145 extern spinlock_t kvm_lock; 146 extern struct list_head vm_list; 147 148 struct kvm_io_range { 149 gpa_t addr; 150 int len; 151 struct kvm_io_device *dev; 152 }; 153 154 #define NR_IOBUS_DEVS 1000 155 156 struct kvm_io_bus { 157 int dev_count; 158 int ioeventfd_count; 159 struct kvm_io_range range[]; 160 }; 161 162 enum kvm_bus { 163 KVM_MMIO_BUS, 164 KVM_PIO_BUS, 165 KVM_VIRTIO_CCW_NOTIFY_BUS, 166 KVM_NR_BUSES 167 }; 168 169 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 170 int len, const void *val); 171 int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 172 int len, const void *val, long cookie); 173 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, 174 void *val); 175 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 176 int len, struct kvm_io_device *dev); 177 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 178 struct kvm_io_device *dev); 179 180 #ifdef CONFIG_KVM_ASYNC_PF 181 struct kvm_async_pf { 182 struct work_struct work; 183 struct list_head link; 184 struct list_head queue; 185 struct kvm_vcpu *vcpu; 186 struct mm_struct *mm; 187 gva_t gva; 188 unsigned long addr; 189 struct kvm_arch_async_pf arch; 190 bool wakeup_all; 191 }; 192 193 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); 194 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); 195 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, 196 struct kvm_arch_async_pf *arch); 197 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); 198 #endif 199 200 enum { 201 OUTSIDE_GUEST_MODE, 202 IN_GUEST_MODE, 203 EXITING_GUEST_MODE, 204 READING_SHADOW_PAGE_TABLES, 205 }; 206 207 /* 208 * Sometimes a large or cross-page mmio needs to be broken up into separate 209 * exits for userspace servicing. 210 */ 211 struct kvm_mmio_fragment { 212 gpa_t gpa; 213 void *data; 214 unsigned len; 215 }; 216 217 struct kvm_vcpu { 218 struct kvm *kvm; 219 #ifdef CONFIG_PREEMPT_NOTIFIERS 220 struct preempt_notifier preempt_notifier; 221 #endif 222 int cpu; 223 int vcpu_id; 224 int srcu_idx; 225 int mode; 226 unsigned long requests; 227 unsigned long guest_debug; 228 229 struct mutex mutex; 230 struct kvm_run *run; 231 232 int fpu_active; 233 int guest_fpu_loaded, guest_xcr0_loaded; 234 wait_queue_head_t wq; 235 struct pid *pid; 236 int sigset_active; 237 sigset_t sigset; 238 struct kvm_vcpu_stat stat; 239 240 #ifdef CONFIG_HAS_IOMEM 241 int mmio_needed; 242 int mmio_read_completed; 243 int mmio_is_write; 244 int mmio_cur_fragment; 245 int mmio_nr_fragments; 246 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; 247 #endif 248 249 #ifdef CONFIG_KVM_ASYNC_PF 250 struct { 251 u32 queued; 252 struct list_head queue; 253 struct list_head done; 254 spinlock_t lock; 255 } async_pf; 256 #endif 257 258 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 259 /* 260 * Cpu relax intercept or pause loop exit optimization 261 * in_spin_loop: set when a vcpu does a pause loop exit 262 * or cpu relax intercepted. 263 * dy_eligible: indicates whether vcpu is eligible for directed yield. 264 */ 265 struct { 266 bool in_spin_loop; 267 bool dy_eligible; 268 } spin_loop; 269 #endif 270 bool preempted; 271 struct kvm_vcpu_arch arch; 272 }; 273 274 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) 275 { 276 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); 277 } 278 279 /* 280 * Some of the bitops functions do not support too long bitmaps. 281 * This number must be determined not to exceed such limits. 282 */ 283 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) 284 285 struct kvm_memory_slot { 286 gfn_t base_gfn; 287 unsigned long npages; 288 unsigned long *dirty_bitmap; 289 struct kvm_arch_memory_slot arch; 290 unsigned long userspace_addr; 291 u32 flags; 292 short id; 293 }; 294 295 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) 296 { 297 return ALIGN(memslot->npages, BITS_PER_LONG) / 8; 298 } 299 300 struct kvm_kernel_irq_routing_entry { 301 u32 gsi; 302 u32 type; 303 int (*set)(struct kvm_kernel_irq_routing_entry *e, 304 struct kvm *kvm, int irq_source_id, int level, 305 bool line_status); 306 union { 307 struct { 308 unsigned irqchip; 309 unsigned pin; 310 } irqchip; 311 struct msi_msg msi; 312 }; 313 struct hlist_node link; 314 }; 315 316 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 317 318 struct kvm_irq_routing_table { 319 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; 320 struct kvm_kernel_irq_routing_entry *rt_entries; 321 u32 nr_rt_entries; 322 /* 323 * Array indexed by gsi. Each entry contains list of irq chips 324 * the gsi is connected to. 325 */ 326 struct hlist_head map[0]; 327 }; 328 329 #else 330 331 struct kvm_irq_routing_table {}; 332 333 #endif 334 335 #ifndef KVM_PRIVATE_MEM_SLOTS 336 #define KVM_PRIVATE_MEM_SLOTS 0 337 #endif 338 339 #ifndef KVM_MEM_SLOTS_NUM 340 #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) 341 #endif 342 343 /* 344 * Note: 345 * memslots are not sorted by id anymore, please use id_to_memslot() 346 * to get the memslot by its id. 347 */ 348 struct kvm_memslots { 349 u64 generation; 350 struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; 351 /* The mapping table from slot id to the index in memslots[]. */ 352 short id_to_index[KVM_MEM_SLOTS_NUM]; 353 }; 354 355 struct kvm { 356 spinlock_t mmu_lock; 357 struct mutex slots_lock; 358 struct mm_struct *mm; /* userspace tied to this vm */ 359 struct kvm_memslots *memslots; 360 struct srcu_struct srcu; 361 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 362 u32 bsp_vcpu_id; 363 #endif 364 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; 365 atomic_t online_vcpus; 366 int last_boosted_vcpu; 367 struct list_head vm_list; 368 struct mutex lock; 369 struct kvm_io_bus *buses[KVM_NR_BUSES]; 370 #ifdef CONFIG_HAVE_KVM_EVENTFD 371 struct { 372 spinlock_t lock; 373 struct list_head items; 374 struct list_head resampler_list; 375 struct mutex resampler_lock; 376 } irqfds; 377 struct list_head ioeventfds; 378 #endif 379 struct kvm_vm_stat stat; 380 struct kvm_arch arch; 381 atomic_t users_count; 382 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 383 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; 384 spinlock_t ring_lock; 385 struct list_head coalesced_zones; 386 #endif 387 388 struct mutex irq_lock; 389 #ifdef CONFIG_HAVE_KVM_IRQCHIP 390 /* 391 * Update side is protected by irq_lock and, 392 * if configured, irqfds.lock. 393 */ 394 struct kvm_irq_routing_table __rcu *irq_routing; 395 struct hlist_head mask_notifier_list; 396 struct hlist_head irq_ack_notifier_list; 397 #endif 398 399 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 400 struct mmu_notifier mmu_notifier; 401 unsigned long mmu_notifier_seq; 402 long mmu_notifier_count; 403 #endif 404 long tlbs_dirty; 405 struct list_head devices; 406 }; 407 408 #define kvm_err(fmt, ...) \ 409 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 410 #define kvm_info(fmt, ...) \ 411 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 412 #define kvm_debug(fmt, ...) \ 413 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 414 #define kvm_pr_unimpl(fmt, ...) \ 415 pr_err_ratelimited("kvm [%i]: " fmt, \ 416 task_tgid_nr(current), ## __VA_ARGS__) 417 418 /* The guest did something we don't support. */ 419 #define vcpu_unimpl(vcpu, fmt, ...) \ 420 kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) 421 422 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) 423 { 424 smp_rmb(); 425 return kvm->vcpus[i]; 426 } 427 428 #define kvm_for_each_vcpu(idx, vcpup, kvm) \ 429 for (idx = 0; \ 430 idx < atomic_read(&kvm->online_vcpus) && \ 431 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ 432 idx++) 433 434 #define kvm_for_each_memslot(memslot, slots) \ 435 for (memslot = &slots->memslots[0]; \ 436 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ 437 memslot++) 438 439 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); 440 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); 441 442 int __must_check vcpu_load(struct kvm_vcpu *vcpu); 443 void vcpu_put(struct kvm_vcpu *vcpu); 444 445 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 446 int kvm_irqfd_init(void); 447 void kvm_irqfd_exit(void); 448 #else 449 static inline int kvm_irqfd_init(void) 450 { 451 return 0; 452 } 453 454 static inline void kvm_irqfd_exit(void) 455 { 456 } 457 #endif 458 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 459 struct module *module); 460 void kvm_exit(void); 461 462 void kvm_get_kvm(struct kvm *kvm); 463 void kvm_put_kvm(struct kvm *kvm); 464 465 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) 466 { 467 return rcu_dereference_check(kvm->memslots, 468 srcu_read_lock_held(&kvm->srcu) 469 || lockdep_is_held(&kvm->slots_lock)); 470 } 471 472 static inline struct kvm_memory_slot * 473 id_to_memslot(struct kvm_memslots *slots, int id) 474 { 475 int index = slots->id_to_index[id]; 476 struct kvm_memory_slot *slot; 477 478 slot = &slots->memslots[index]; 479 480 WARN_ON(slot->id != id); 481 return slot; 482 } 483 484 /* 485 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: 486 * - create a new memory slot 487 * - delete an existing memory slot 488 * - modify an existing memory slot 489 * -- move it in the guest physical memory space 490 * -- just change its flags 491 * 492 * Since flags can be changed by some of these operations, the following 493 * differentiation is the best we can do for __kvm_set_memory_region(): 494 */ 495 enum kvm_mr_change { 496 KVM_MR_CREATE, 497 KVM_MR_DELETE, 498 KVM_MR_MOVE, 499 KVM_MR_FLAGS_ONLY, 500 }; 501 502 int kvm_set_memory_region(struct kvm *kvm, 503 struct kvm_userspace_memory_region *mem); 504 int __kvm_set_memory_region(struct kvm *kvm, 505 struct kvm_userspace_memory_region *mem); 506 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 507 struct kvm_memory_slot *dont); 508 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 509 unsigned long npages); 510 void kvm_arch_memslots_updated(struct kvm *kvm); 511 int kvm_arch_prepare_memory_region(struct kvm *kvm, 512 struct kvm_memory_slot *memslot, 513 struct kvm_userspace_memory_region *mem, 514 enum kvm_mr_change change); 515 void kvm_arch_commit_memory_region(struct kvm *kvm, 516 struct kvm_userspace_memory_region *mem, 517 const struct kvm_memory_slot *old, 518 enum kvm_mr_change change); 519 bool kvm_largepages_enabled(void); 520 void kvm_disable_largepages(void); 521 /* flush all memory translations */ 522 void kvm_arch_flush_shadow_all(struct kvm *kvm); 523 /* flush memory translations pointing to 'slot' */ 524 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 525 struct kvm_memory_slot *slot); 526 527 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, 528 int nr_pages); 529 530 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 531 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 532 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); 533 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 534 void kvm_release_page_clean(struct page *page); 535 void kvm_release_page_dirty(struct page *page); 536 void kvm_set_page_accessed(struct page *page); 537 538 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); 539 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, 540 bool write_fault, bool *writable); 541 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 542 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 543 bool *writable); 544 pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 545 pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); 546 547 void kvm_release_pfn_clean(pfn_t pfn); 548 void kvm_set_pfn_dirty(pfn_t pfn); 549 void kvm_set_pfn_accessed(pfn_t pfn); 550 void kvm_get_pfn(pfn_t pfn); 551 552 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 553 int len); 554 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 555 unsigned long len); 556 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); 557 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 558 void *data, unsigned long len); 559 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 560 int offset, int len); 561 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 562 unsigned long len); 563 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 564 void *data, unsigned long len); 565 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 566 gpa_t gpa, unsigned long len); 567 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); 568 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); 569 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 570 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); 571 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); 572 void mark_page_dirty(struct kvm *kvm, gfn_t gfn); 573 574 void kvm_vcpu_block(struct kvm_vcpu *vcpu); 575 void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 576 bool kvm_vcpu_yield_to(struct kvm_vcpu *target); 577 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); 578 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); 579 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); 580 581 void kvm_flush_remote_tlbs(struct kvm *kvm); 582 void kvm_reload_remote_mmus(struct kvm *kvm); 583 void kvm_make_mclock_inprogress_request(struct kvm *kvm); 584 void kvm_make_scan_ioapic_request(struct kvm *kvm); 585 586 long kvm_arch_dev_ioctl(struct file *filp, 587 unsigned int ioctl, unsigned long arg); 588 long kvm_arch_vcpu_ioctl(struct file *filp, 589 unsigned int ioctl, unsigned long arg); 590 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); 591 592 int kvm_dev_ioctl_check_extension(long ext); 593 594 int kvm_get_dirty_log(struct kvm *kvm, 595 struct kvm_dirty_log *log, int *is_dirty); 596 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 597 struct kvm_dirty_log *log); 598 599 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, 600 bool line_status); 601 long kvm_arch_vm_ioctl(struct file *filp, 602 unsigned int ioctl, unsigned long arg); 603 604 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 605 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 606 607 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 608 struct kvm_translation *tr); 609 610 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 611 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 612 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 613 struct kvm_sregs *sregs); 614 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 615 struct kvm_sregs *sregs); 616 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 617 struct kvm_mp_state *mp_state); 618 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 619 struct kvm_mp_state *mp_state); 620 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 621 struct kvm_guest_debug *dbg); 622 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); 623 624 int kvm_arch_init(void *opaque); 625 void kvm_arch_exit(void); 626 627 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); 628 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); 629 630 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); 631 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 632 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); 633 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); 634 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); 635 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); 636 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); 637 638 int kvm_arch_hardware_enable(void *garbage); 639 void kvm_arch_hardware_disable(void *garbage); 640 int kvm_arch_hardware_setup(void); 641 void kvm_arch_hardware_unsetup(void); 642 void kvm_arch_check_processor_compat(void *rtn); 643 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); 644 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); 645 646 void *kvm_kvzalloc(unsigned long size); 647 void kvm_kvfree(const void *addr); 648 649 #ifndef __KVM_HAVE_ARCH_VM_ALLOC 650 static inline struct kvm *kvm_arch_alloc_vm(void) 651 { 652 return kzalloc(sizeof(struct kvm), GFP_KERNEL); 653 } 654 655 static inline void kvm_arch_free_vm(struct kvm *kvm) 656 { 657 kfree(kvm); 658 } 659 #endif 660 661 #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA 662 void kvm_arch_register_noncoherent_dma(struct kvm *kvm); 663 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); 664 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm); 665 #else 666 static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) 667 { 668 } 669 670 static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) 671 { 672 } 673 674 static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) 675 { 676 return false; 677 } 678 #endif 679 680 static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) 681 { 682 #ifdef __KVM_HAVE_ARCH_WQP 683 return vcpu->arch.wqp; 684 #else 685 return &vcpu->wq; 686 #endif 687 } 688 689 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); 690 void kvm_arch_destroy_vm(struct kvm *kvm); 691 void kvm_arch_sync_events(struct kvm *kvm); 692 693 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 694 void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 695 696 bool kvm_is_mmio_pfn(pfn_t pfn); 697 698 struct kvm_irq_ack_notifier { 699 struct hlist_node link; 700 unsigned gsi; 701 void (*irq_acked)(struct kvm_irq_ack_notifier *kian); 702 }; 703 704 struct kvm_assigned_dev_kernel { 705 struct kvm_irq_ack_notifier ack_notifier; 706 struct list_head list; 707 int assigned_dev_id; 708 int host_segnr; 709 int host_busnr; 710 int host_devfn; 711 unsigned int entries_nr; 712 int host_irq; 713 bool host_irq_disabled; 714 bool pci_2_3; 715 struct msix_entry *host_msix_entries; 716 int guest_irq; 717 struct msix_entry *guest_msix_entries; 718 unsigned long irq_requested_type; 719 int irq_source_id; 720 int flags; 721 struct pci_dev *dev; 722 struct kvm *kvm; 723 spinlock_t intx_lock; 724 spinlock_t intx_mask_lock; 725 char irq_name[32]; 726 struct pci_saved_state *pci_saved_state; 727 }; 728 729 struct kvm_irq_mask_notifier { 730 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); 731 int irq; 732 struct hlist_node link; 733 }; 734 735 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, 736 struct kvm_irq_mask_notifier *kimn); 737 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, 738 struct kvm_irq_mask_notifier *kimn); 739 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, 740 bool mask); 741 742 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, 743 bool line_status); 744 int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level); 745 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, 746 int irq_source_id, int level, bool line_status); 747 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); 748 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); 749 void kvm_register_irq_ack_notifier(struct kvm *kvm, 750 struct kvm_irq_ack_notifier *kian); 751 void kvm_unregister_irq_ack_notifier(struct kvm *kvm, 752 struct kvm_irq_ack_notifier *kian); 753 int kvm_request_irq_source_id(struct kvm *kvm); 754 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); 755 756 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT 757 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); 758 void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); 759 int kvm_iommu_map_guest(struct kvm *kvm); 760 int kvm_iommu_unmap_guest(struct kvm *kvm); 761 int kvm_assign_device(struct kvm *kvm, 762 struct kvm_assigned_dev_kernel *assigned_dev); 763 int kvm_deassign_device(struct kvm *kvm, 764 struct kvm_assigned_dev_kernel *assigned_dev); 765 #else 766 static inline int kvm_iommu_map_pages(struct kvm *kvm, 767 struct kvm_memory_slot *slot) 768 { 769 return 0; 770 } 771 772 static inline void kvm_iommu_unmap_pages(struct kvm *kvm, 773 struct kvm_memory_slot *slot) 774 { 775 } 776 777 static inline int kvm_iommu_unmap_guest(struct kvm *kvm) 778 { 779 return 0; 780 } 781 #endif 782 783 static inline void kvm_guest_enter(void) 784 { 785 unsigned long flags; 786 787 BUG_ON(preemptible()); 788 789 local_irq_save(flags); 790 guest_enter(); 791 local_irq_restore(flags); 792 793 /* KVM does not hold any references to rcu protected data when it 794 * switches CPU into a guest mode. In fact switching to a guest mode 795 * is very similar to exiting to userspace from rcu point of view. In 796 * addition CPU may stay in a guest mode for quite a long time (up to 797 * one time slice). Lets treat guest mode as quiescent state, just like 798 * we do with user-mode execution. 799 */ 800 rcu_virt_note_context_switch(smp_processor_id()); 801 } 802 803 static inline void kvm_guest_exit(void) 804 { 805 unsigned long flags; 806 807 local_irq_save(flags); 808 guest_exit(); 809 local_irq_restore(flags); 810 } 811 812 /* 813 * search_memslots() and __gfn_to_memslot() are here because they are 814 * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. 815 * gfn_to_memslot() itself isn't here as an inline because that would 816 * bloat other code too much. 817 */ 818 static inline struct kvm_memory_slot * 819 search_memslots(struct kvm_memslots *slots, gfn_t gfn) 820 { 821 struct kvm_memory_slot *memslot; 822 823 kvm_for_each_memslot(memslot, slots) 824 if (gfn >= memslot->base_gfn && 825 gfn < memslot->base_gfn + memslot->npages) 826 return memslot; 827 828 return NULL; 829 } 830 831 static inline struct kvm_memory_slot * 832 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) 833 { 834 return search_memslots(slots, gfn); 835 } 836 837 static inline unsigned long 838 __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 839 { 840 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; 841 } 842 843 static inline int memslot_id(struct kvm *kvm, gfn_t gfn) 844 { 845 return gfn_to_memslot(kvm, gfn)->id; 846 } 847 848 static inline gfn_t 849 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) 850 { 851 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; 852 853 return slot->base_gfn + gfn_offset; 854 } 855 856 static inline gpa_t gfn_to_gpa(gfn_t gfn) 857 { 858 return (gpa_t)gfn << PAGE_SHIFT; 859 } 860 861 static inline gfn_t gpa_to_gfn(gpa_t gpa) 862 { 863 return (gfn_t)(gpa >> PAGE_SHIFT); 864 } 865 866 static inline hpa_t pfn_to_hpa(pfn_t pfn) 867 { 868 return (hpa_t)pfn << PAGE_SHIFT; 869 } 870 871 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu) 872 { 873 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); 874 } 875 876 enum kvm_stat_kind { 877 KVM_STAT_VM, 878 KVM_STAT_VCPU, 879 }; 880 881 struct kvm_stats_debugfs_item { 882 const char *name; 883 int offset; 884 enum kvm_stat_kind kind; 885 struct dentry *dentry; 886 }; 887 extern struct kvm_stats_debugfs_item debugfs_entries[]; 888 extern struct dentry *kvm_debugfs_dir; 889 890 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 891 static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) 892 { 893 if (unlikely(kvm->mmu_notifier_count)) 894 return 1; 895 /* 896 * Ensure the read of mmu_notifier_count happens before the read 897 * of mmu_notifier_seq. This interacts with the smp_wmb() in 898 * mmu_notifier_invalidate_range_end to make sure that the caller 899 * either sees the old (non-zero) value of mmu_notifier_count or 900 * the new (incremented) value of mmu_notifier_seq. 901 * PowerPC Book3s HV KVM calls this under a per-page lock 902 * rather than under kvm->mmu_lock, for scalability, so 903 * can't rely on kvm->mmu_lock to keep things ordered. 904 */ 905 smp_rmb(); 906 if (kvm->mmu_notifier_seq != mmu_seq) 907 return 1; 908 return 0; 909 } 910 #endif 911 912 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 913 914 #define KVM_MAX_IRQ_ROUTES 1024 915 916 int kvm_setup_default_irq_routing(struct kvm *kvm); 917 int kvm_set_irq_routing(struct kvm *kvm, 918 const struct kvm_irq_routing_entry *entries, 919 unsigned nr, 920 unsigned flags); 921 int kvm_set_routing_entry(struct kvm_irq_routing_table *rt, 922 struct kvm_kernel_irq_routing_entry *e, 923 const struct kvm_irq_routing_entry *ue); 924 void kvm_free_irq_routing(struct kvm *kvm); 925 926 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); 927 928 #else 929 930 static inline void kvm_free_irq_routing(struct kvm *kvm) {} 931 932 #endif 933 934 #ifdef CONFIG_HAVE_KVM_EVENTFD 935 936 void kvm_eventfd_init(struct kvm *kvm); 937 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); 938 939 #ifdef CONFIG_HAVE_KVM_IRQCHIP 940 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); 941 void kvm_irqfd_release(struct kvm *kvm); 942 void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *); 943 #else 944 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) 945 { 946 return -EINVAL; 947 } 948 949 static inline void kvm_irqfd_release(struct kvm *kvm) {} 950 #endif 951 952 #else 953 954 static inline void kvm_eventfd_init(struct kvm *kvm) {} 955 956 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) 957 { 958 return -EINVAL; 959 } 960 961 static inline void kvm_irqfd_release(struct kvm *kvm) {} 962 963 #ifdef CONFIG_HAVE_KVM_IRQCHIP 964 static inline void kvm_irq_routing_update(struct kvm *kvm, 965 struct kvm_irq_routing_table *irq_rt) 966 { 967 rcu_assign_pointer(kvm->irq_routing, irq_rt); 968 } 969 #endif 970 971 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) 972 { 973 return -ENOSYS; 974 } 975 976 #endif /* CONFIG_HAVE_KVM_EVENTFD */ 977 978 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 979 static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) 980 { 981 return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id; 982 } 983 984 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu); 985 986 #else 987 988 static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; } 989 990 #endif 991 992 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT 993 994 long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, 995 unsigned long arg); 996 997 void kvm_free_all_assigned_devices(struct kvm *kvm); 998 999 #else 1000 1001 static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, 1002 unsigned long arg) 1003 { 1004 return -ENOTTY; 1005 } 1006 1007 static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {} 1008 1009 #endif 1010 1011 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) 1012 { 1013 set_bit(req, &vcpu->requests); 1014 } 1015 1016 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) 1017 { 1018 if (test_bit(req, &vcpu->requests)) { 1019 clear_bit(req, &vcpu->requests); 1020 return true; 1021 } else { 1022 return false; 1023 } 1024 } 1025 1026 extern bool kvm_rebooting; 1027 1028 struct kvm_device_ops; 1029 1030 struct kvm_device { 1031 struct kvm_device_ops *ops; 1032 struct kvm *kvm; 1033 void *private; 1034 struct list_head vm_node; 1035 }; 1036 1037 /* create, destroy, and name are mandatory */ 1038 struct kvm_device_ops { 1039 const char *name; 1040 int (*create)(struct kvm_device *dev, u32 type); 1041 1042 /* 1043 * Destroy is responsible for freeing dev. 1044 * 1045 * Destroy may be called before or after destructors are called 1046 * on emulated I/O regions, depending on whether a reference is 1047 * held by a vcpu or other kvm component that gets destroyed 1048 * after the emulated I/O. 1049 */ 1050 void (*destroy)(struct kvm_device *dev); 1051 1052 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 1053 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 1054 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 1055 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, 1056 unsigned long arg); 1057 }; 1058 1059 void kvm_device_get(struct kvm_device *dev); 1060 void kvm_device_put(struct kvm_device *dev); 1061 struct kvm_device *kvm_device_from_filp(struct file *filp); 1062 1063 extern struct kvm_device_ops kvm_mpic_ops; 1064 extern struct kvm_device_ops kvm_xics_ops; 1065 extern struct kvm_device_ops kvm_vfio_ops; 1066 extern struct kvm_device_ops kvm_arm_vgic_v2_ops; 1067 1068 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 1069 1070 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) 1071 { 1072 vcpu->spin_loop.in_spin_loop = val; 1073 } 1074 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) 1075 { 1076 vcpu->spin_loop.dy_eligible = val; 1077 } 1078 1079 #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ 1080 1081 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) 1082 { 1083 } 1084 1085 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) 1086 { 1087 } 1088 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ 1089 #endif 1090 1091