1 #ifndef __KVM_HOST_H 2 #define __KVM_HOST_H 3 4 /* 5 * This work is licensed under the terms of the GNU GPL, version 2. See 6 * the COPYING file in the top-level directory. 7 */ 8 9 #include <linux/types.h> 10 #include <linux/hardirq.h> 11 #include <linux/list.h> 12 #include <linux/mutex.h> 13 #include <linux/spinlock.h> 14 #include <linux/signal.h> 15 #include <linux/sched.h> 16 #include <linux/bug.h> 17 #include <linux/mm.h> 18 #include <linux/mmu_notifier.h> 19 #include <linux/preempt.h> 20 #include <linux/msi.h> 21 #include <linux/slab.h> 22 #include <linux/rcupdate.h> 23 #include <linux/ratelimit.h> 24 #include <linux/err.h> 25 #include <linux/irqflags.h> 26 #include <linux/context_tracking.h> 27 #include <asm/signal.h> 28 29 #include <linux/kvm.h> 30 #include <linux/kvm_para.h> 31 32 #include <linux/kvm_types.h> 33 34 #include <asm/kvm_host.h> 35 36 #ifndef KVM_MMIO_SIZE 37 #define KVM_MMIO_SIZE 8 38 #endif 39 40 /* 41 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used 42 * in kvm, other bits are visible for userspace which are defined in 43 * include/linux/kvm_h. 44 */ 45 #define KVM_MEMSLOT_INVALID (1UL << 16) 46 47 /* Two fragments for cross MMIO pages. */ 48 #define KVM_MAX_MMIO_FRAGMENTS 2 49 50 /* 51 * For the normal pfn, the highest 12 bits should be zero, 52 * so we can mask bit 62 ~ bit 52 to indicate the error pfn, 53 * mask bit 63 to indicate the noslot pfn. 54 */ 55 #define KVM_PFN_ERR_MASK (0x7ffULL << 52) 56 #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) 57 #define KVM_PFN_NOSLOT (0x1ULL << 63) 58 59 #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) 60 #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) 61 #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) 62 63 /* 64 * error pfns indicate that the gfn is in slot but faild to 65 * translate it to pfn on host. 66 */ 67 static inline bool is_error_pfn(pfn_t pfn) 68 { 69 return !!(pfn & KVM_PFN_ERR_MASK); 70 } 71 72 /* 73 * error_noslot pfns indicate that the gfn can not be 74 * translated to pfn - it is not in slot or failed to 75 * translate it to pfn. 76 */ 77 static inline bool is_error_noslot_pfn(pfn_t pfn) 78 { 79 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); 80 } 81 82 /* noslot pfn indicates that the gfn is not in slot. */ 83 static inline bool is_noslot_pfn(pfn_t pfn) 84 { 85 return pfn == KVM_PFN_NOSLOT; 86 } 87 88 /* 89 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390) 90 * provide own defines and kvm_is_error_hva 91 */ 92 #ifndef KVM_HVA_ERR_BAD 93 94 #define KVM_HVA_ERR_BAD (PAGE_OFFSET) 95 #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) 96 97 static inline bool kvm_is_error_hva(unsigned long addr) 98 { 99 return addr >= PAGE_OFFSET; 100 } 101 102 #endif 103 104 #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) 105 106 static inline bool is_error_page(struct page *page) 107 { 108 return IS_ERR(page); 109 } 110 111 /* 112 * vcpu->requests bit members 113 */ 114 #define KVM_REQ_TLB_FLUSH 0 115 #define KVM_REQ_MIGRATE_TIMER 1 116 #define KVM_REQ_REPORT_TPR_ACCESS 2 117 #define KVM_REQ_MMU_RELOAD 3 118 #define KVM_REQ_TRIPLE_FAULT 4 119 #define KVM_REQ_PENDING_TIMER 5 120 #define KVM_REQ_UNHALT 6 121 #define KVM_REQ_MMU_SYNC 7 122 #define KVM_REQ_CLOCK_UPDATE 8 123 #define KVM_REQ_KICK 9 124 #define KVM_REQ_DEACTIVATE_FPU 10 125 #define KVM_REQ_EVENT 11 126 #define KVM_REQ_APF_HALT 12 127 #define KVM_REQ_STEAL_UPDATE 13 128 #define KVM_REQ_NMI 14 129 #define KVM_REQ_PMU 15 130 #define KVM_REQ_PMI 16 131 #define KVM_REQ_WATCHDOG 17 132 #define KVM_REQ_MASTERCLOCK_UPDATE 18 133 #define KVM_REQ_MCLOCK_INPROGRESS 19 134 #define KVM_REQ_EPR_EXIT 20 135 #define KVM_REQ_SCAN_IOAPIC 21 136 #define KVM_REQ_GLOBAL_CLOCK_UPDATE 22 137 138 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 139 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 140 141 struct kvm; 142 struct kvm_vcpu; 143 extern struct kmem_cache *kvm_vcpu_cache; 144 145 extern raw_spinlock_t kvm_lock; 146 extern struct list_head vm_list; 147 148 struct kvm_io_range { 149 gpa_t addr; 150 int len; 151 struct kvm_io_device *dev; 152 }; 153 154 #define NR_IOBUS_DEVS 1000 155 156 struct kvm_io_bus { 157 int dev_count; 158 int ioeventfd_count; 159 struct kvm_io_range range[]; 160 }; 161 162 enum kvm_bus { 163 KVM_MMIO_BUS, 164 KVM_PIO_BUS, 165 KVM_VIRTIO_CCW_NOTIFY_BUS, 166 KVM_NR_BUSES 167 }; 168 169 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 170 int len, const void *val); 171 int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 172 int len, const void *val, long cookie); 173 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, 174 void *val); 175 int kvm_io_bus_read_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 176 int len, void *val, long cookie); 177 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 178 int len, struct kvm_io_device *dev); 179 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 180 struct kvm_io_device *dev); 181 182 #ifdef CONFIG_KVM_ASYNC_PF 183 struct kvm_async_pf { 184 struct work_struct work; 185 struct list_head link; 186 struct list_head queue; 187 struct kvm_vcpu *vcpu; 188 struct mm_struct *mm; 189 gva_t gva; 190 unsigned long addr; 191 struct kvm_arch_async_pf arch; 192 struct page *page; 193 bool done; 194 }; 195 196 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); 197 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); 198 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, 199 struct kvm_arch_async_pf *arch); 200 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); 201 #endif 202 203 enum { 204 OUTSIDE_GUEST_MODE, 205 IN_GUEST_MODE, 206 EXITING_GUEST_MODE, 207 READING_SHADOW_PAGE_TABLES, 208 }; 209 210 /* 211 * Sometimes a large or cross-page mmio needs to be broken up into separate 212 * exits for userspace servicing. 213 */ 214 struct kvm_mmio_fragment { 215 gpa_t gpa; 216 void *data; 217 unsigned len; 218 }; 219 220 struct kvm_vcpu { 221 struct kvm *kvm; 222 #ifdef CONFIG_PREEMPT_NOTIFIERS 223 struct preempt_notifier preempt_notifier; 224 #endif 225 int cpu; 226 int vcpu_id; 227 int srcu_idx; 228 int mode; 229 unsigned long requests; 230 unsigned long guest_debug; 231 232 struct mutex mutex; 233 struct kvm_run *run; 234 235 int fpu_active; 236 int guest_fpu_loaded, guest_xcr0_loaded; 237 wait_queue_head_t wq; 238 struct pid *pid; 239 int sigset_active; 240 sigset_t sigset; 241 struct kvm_vcpu_stat stat; 242 243 #ifdef CONFIG_HAS_IOMEM 244 int mmio_needed; 245 int mmio_read_completed; 246 int mmio_is_write; 247 int mmio_cur_fragment; 248 int mmio_nr_fragments; 249 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; 250 #endif 251 252 #ifdef CONFIG_KVM_ASYNC_PF 253 struct { 254 u32 queued; 255 struct list_head queue; 256 struct list_head done; 257 spinlock_t lock; 258 } async_pf; 259 #endif 260 261 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 262 /* 263 * Cpu relax intercept or pause loop exit optimization 264 * in_spin_loop: set when a vcpu does a pause loop exit 265 * or cpu relax intercepted. 266 * dy_eligible: indicates whether vcpu is eligible for directed yield. 267 */ 268 struct { 269 bool in_spin_loop; 270 bool dy_eligible; 271 } spin_loop; 272 #endif 273 bool preempted; 274 struct kvm_vcpu_arch arch; 275 }; 276 277 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) 278 { 279 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); 280 } 281 282 /* 283 * Some of the bitops functions do not support too long bitmaps. 284 * This number must be determined not to exceed such limits. 285 */ 286 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) 287 288 struct kvm_memory_slot { 289 gfn_t base_gfn; 290 unsigned long npages; 291 unsigned long *dirty_bitmap; 292 struct kvm_arch_memory_slot arch; 293 unsigned long userspace_addr; 294 u32 flags; 295 short id; 296 }; 297 298 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) 299 { 300 return ALIGN(memslot->npages, BITS_PER_LONG) / 8; 301 } 302 303 struct kvm_kernel_irq_routing_entry { 304 u32 gsi; 305 u32 type; 306 int (*set)(struct kvm_kernel_irq_routing_entry *e, 307 struct kvm *kvm, int irq_source_id, int level, 308 bool line_status); 309 union { 310 struct { 311 unsigned irqchip; 312 unsigned pin; 313 } irqchip; 314 struct msi_msg msi; 315 }; 316 struct hlist_node link; 317 }; 318 319 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 320 321 struct kvm_irq_routing_table { 322 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; 323 struct kvm_kernel_irq_routing_entry *rt_entries; 324 u32 nr_rt_entries; 325 /* 326 * Array indexed by gsi. Each entry contains list of irq chips 327 * the gsi is connected to. 328 */ 329 struct hlist_head map[0]; 330 }; 331 332 #else 333 334 struct kvm_irq_routing_table {}; 335 336 #endif 337 338 #ifndef KVM_PRIVATE_MEM_SLOTS 339 #define KVM_PRIVATE_MEM_SLOTS 0 340 #endif 341 342 #ifndef KVM_MEM_SLOTS_NUM 343 #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) 344 #endif 345 346 /* 347 * Note: 348 * memslots are not sorted by id anymore, please use id_to_memslot() 349 * to get the memslot by its id. 350 */ 351 struct kvm_memslots { 352 u64 generation; 353 struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; 354 /* The mapping table from slot id to the index in memslots[]. */ 355 short id_to_index[KVM_MEM_SLOTS_NUM]; 356 }; 357 358 struct kvm { 359 spinlock_t mmu_lock; 360 struct mutex slots_lock; 361 struct mm_struct *mm; /* userspace tied to this vm */ 362 struct kvm_memslots *memslots; 363 struct srcu_struct srcu; 364 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 365 u32 bsp_vcpu_id; 366 #endif 367 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; 368 atomic_t online_vcpus; 369 int last_boosted_vcpu; 370 struct list_head vm_list; 371 struct mutex lock; 372 struct kvm_io_bus *buses[KVM_NR_BUSES]; 373 #ifdef CONFIG_HAVE_KVM_EVENTFD 374 struct { 375 spinlock_t lock; 376 struct list_head items; 377 struct list_head resampler_list; 378 struct mutex resampler_lock; 379 } irqfds; 380 struct list_head ioeventfds; 381 #endif 382 struct kvm_vm_stat stat; 383 struct kvm_arch arch; 384 atomic_t users_count; 385 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 386 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; 387 spinlock_t ring_lock; 388 struct list_head coalesced_zones; 389 #endif 390 391 struct mutex irq_lock; 392 #ifdef CONFIG_HAVE_KVM_IRQCHIP 393 /* 394 * Update side is protected by irq_lock and, 395 * if configured, irqfds.lock. 396 */ 397 struct kvm_irq_routing_table __rcu *irq_routing; 398 struct hlist_head mask_notifier_list; 399 struct hlist_head irq_ack_notifier_list; 400 #endif 401 402 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 403 struct mmu_notifier mmu_notifier; 404 unsigned long mmu_notifier_seq; 405 long mmu_notifier_count; 406 #endif 407 long tlbs_dirty; 408 struct list_head devices; 409 }; 410 411 #define kvm_err(fmt, ...) \ 412 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 413 #define kvm_info(fmt, ...) \ 414 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 415 #define kvm_debug(fmt, ...) \ 416 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 417 #define kvm_pr_unimpl(fmt, ...) \ 418 pr_err_ratelimited("kvm [%i]: " fmt, \ 419 task_tgid_nr(current), ## __VA_ARGS__) 420 421 /* The guest did something we don't support. */ 422 #define vcpu_unimpl(vcpu, fmt, ...) \ 423 kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) 424 425 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) 426 { 427 smp_rmb(); 428 return kvm->vcpus[i]; 429 } 430 431 #define kvm_for_each_vcpu(idx, vcpup, kvm) \ 432 for (idx = 0; \ 433 idx < atomic_read(&kvm->online_vcpus) && \ 434 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ 435 idx++) 436 437 #define kvm_for_each_memslot(memslot, slots) \ 438 for (memslot = &slots->memslots[0]; \ 439 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ 440 memslot++) 441 442 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); 443 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); 444 445 int __must_check vcpu_load(struct kvm_vcpu *vcpu); 446 void vcpu_put(struct kvm_vcpu *vcpu); 447 448 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 449 int kvm_irqfd_init(void); 450 void kvm_irqfd_exit(void); 451 #else 452 static inline int kvm_irqfd_init(void) 453 { 454 return 0; 455 } 456 457 static inline void kvm_irqfd_exit(void) 458 { 459 } 460 #endif 461 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 462 struct module *module); 463 void kvm_exit(void); 464 465 void kvm_get_kvm(struct kvm *kvm); 466 void kvm_put_kvm(struct kvm *kvm); 467 void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new, 468 u64 last_generation); 469 470 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) 471 { 472 return rcu_dereference_check(kvm->memslots, 473 srcu_read_lock_held(&kvm->srcu) 474 || lockdep_is_held(&kvm->slots_lock)); 475 } 476 477 static inline struct kvm_memory_slot * 478 id_to_memslot(struct kvm_memslots *slots, int id) 479 { 480 int index = slots->id_to_index[id]; 481 struct kvm_memory_slot *slot; 482 483 slot = &slots->memslots[index]; 484 485 WARN_ON(slot->id != id); 486 return slot; 487 } 488 489 /* 490 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: 491 * - create a new memory slot 492 * - delete an existing memory slot 493 * - modify an existing memory slot 494 * -- move it in the guest physical memory space 495 * -- just change its flags 496 * 497 * Since flags can be changed by some of these operations, the following 498 * differentiation is the best we can do for __kvm_set_memory_region(): 499 */ 500 enum kvm_mr_change { 501 KVM_MR_CREATE, 502 KVM_MR_DELETE, 503 KVM_MR_MOVE, 504 KVM_MR_FLAGS_ONLY, 505 }; 506 507 int kvm_set_memory_region(struct kvm *kvm, 508 struct kvm_userspace_memory_region *mem); 509 int __kvm_set_memory_region(struct kvm *kvm, 510 struct kvm_userspace_memory_region *mem); 511 void kvm_arch_free_memslot(struct kvm_memory_slot *free, 512 struct kvm_memory_slot *dont); 513 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages); 514 void kvm_arch_memslots_updated(struct kvm *kvm); 515 int kvm_arch_prepare_memory_region(struct kvm *kvm, 516 struct kvm_memory_slot *memslot, 517 struct kvm_userspace_memory_region *mem, 518 enum kvm_mr_change change); 519 void kvm_arch_commit_memory_region(struct kvm *kvm, 520 struct kvm_userspace_memory_region *mem, 521 const struct kvm_memory_slot *old, 522 enum kvm_mr_change change); 523 bool kvm_largepages_enabled(void); 524 void kvm_disable_largepages(void); 525 /* flush all memory translations */ 526 void kvm_arch_flush_shadow_all(struct kvm *kvm); 527 /* flush memory translations pointing to 'slot' */ 528 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 529 struct kvm_memory_slot *slot); 530 531 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, 532 int nr_pages); 533 534 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 535 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 536 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); 537 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 538 void kvm_release_page_clean(struct page *page); 539 void kvm_release_page_dirty(struct page *page); 540 void kvm_set_page_dirty(struct page *page); 541 void kvm_set_page_accessed(struct page *page); 542 543 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); 544 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, 545 bool write_fault, bool *writable); 546 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 547 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 548 bool *writable); 549 pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 550 pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); 551 552 void kvm_release_pfn_dirty(pfn_t pfn); 553 void kvm_release_pfn_clean(pfn_t pfn); 554 void kvm_set_pfn_dirty(pfn_t pfn); 555 void kvm_set_pfn_accessed(pfn_t pfn); 556 void kvm_get_pfn(pfn_t pfn); 557 558 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 559 int len); 560 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 561 unsigned long len); 562 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); 563 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 564 void *data, unsigned long len); 565 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 566 int offset, int len); 567 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 568 unsigned long len); 569 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 570 void *data, unsigned long len); 571 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 572 gpa_t gpa, unsigned long len); 573 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); 574 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); 575 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 576 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); 577 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); 578 void mark_page_dirty(struct kvm *kvm, gfn_t gfn); 579 void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, 580 gfn_t gfn); 581 582 void kvm_vcpu_block(struct kvm_vcpu *vcpu); 583 void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 584 bool kvm_vcpu_yield_to(struct kvm_vcpu *target); 585 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); 586 void kvm_resched(struct kvm_vcpu *vcpu); 587 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); 588 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); 589 590 void kvm_flush_remote_tlbs(struct kvm *kvm); 591 void kvm_reload_remote_mmus(struct kvm *kvm); 592 void kvm_make_mclock_inprogress_request(struct kvm *kvm); 593 void kvm_make_scan_ioapic_request(struct kvm *kvm); 594 595 long kvm_arch_dev_ioctl(struct file *filp, 596 unsigned int ioctl, unsigned long arg); 597 long kvm_arch_vcpu_ioctl(struct file *filp, 598 unsigned int ioctl, unsigned long arg); 599 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); 600 601 int kvm_dev_ioctl_check_extension(long ext); 602 603 int kvm_get_dirty_log(struct kvm *kvm, 604 struct kvm_dirty_log *log, int *is_dirty); 605 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 606 struct kvm_dirty_log *log); 607 608 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 609 struct kvm_userspace_memory_region *mem); 610 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, 611 bool line_status); 612 long kvm_arch_vm_ioctl(struct file *filp, 613 unsigned int ioctl, unsigned long arg); 614 615 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 616 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 617 618 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 619 struct kvm_translation *tr); 620 621 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 622 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 623 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 624 struct kvm_sregs *sregs); 625 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 626 struct kvm_sregs *sregs); 627 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 628 struct kvm_mp_state *mp_state); 629 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 630 struct kvm_mp_state *mp_state); 631 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 632 struct kvm_guest_debug *dbg); 633 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); 634 635 int kvm_arch_init(void *opaque); 636 void kvm_arch_exit(void); 637 638 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); 639 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); 640 641 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); 642 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 643 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); 644 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); 645 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); 646 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); 647 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); 648 649 int kvm_arch_hardware_enable(void *garbage); 650 void kvm_arch_hardware_disable(void *garbage); 651 int kvm_arch_hardware_setup(void); 652 void kvm_arch_hardware_unsetup(void); 653 void kvm_arch_check_processor_compat(void *rtn); 654 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); 655 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); 656 657 void kvm_free_physmem(struct kvm *kvm); 658 659 void *kvm_kvzalloc(unsigned long size); 660 void kvm_kvfree(const void *addr); 661 662 #ifndef __KVM_HAVE_ARCH_VM_ALLOC 663 static inline struct kvm *kvm_arch_alloc_vm(void) 664 { 665 return kzalloc(sizeof(struct kvm), GFP_KERNEL); 666 } 667 668 static inline void kvm_arch_free_vm(struct kvm *kvm) 669 { 670 kfree(kvm); 671 } 672 #endif 673 674 static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) 675 { 676 #ifdef __KVM_HAVE_ARCH_WQP 677 return vcpu->arch.wqp; 678 #else 679 return &vcpu->wq; 680 #endif 681 } 682 683 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); 684 void kvm_arch_destroy_vm(struct kvm *kvm); 685 void kvm_arch_sync_events(struct kvm *kvm); 686 687 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 688 void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 689 690 bool kvm_is_mmio_pfn(pfn_t pfn); 691 692 struct kvm_irq_ack_notifier { 693 struct hlist_node link; 694 unsigned gsi; 695 void (*irq_acked)(struct kvm_irq_ack_notifier *kian); 696 }; 697 698 struct kvm_assigned_dev_kernel { 699 struct kvm_irq_ack_notifier ack_notifier; 700 struct list_head list; 701 int assigned_dev_id; 702 int host_segnr; 703 int host_busnr; 704 int host_devfn; 705 unsigned int entries_nr; 706 int host_irq; 707 bool host_irq_disabled; 708 bool pci_2_3; 709 struct msix_entry *host_msix_entries; 710 int guest_irq; 711 struct msix_entry *guest_msix_entries; 712 unsigned long irq_requested_type; 713 int irq_source_id; 714 int flags; 715 struct pci_dev *dev; 716 struct kvm *kvm; 717 spinlock_t intx_lock; 718 spinlock_t intx_mask_lock; 719 char irq_name[32]; 720 struct pci_saved_state *pci_saved_state; 721 }; 722 723 struct kvm_irq_mask_notifier { 724 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); 725 int irq; 726 struct hlist_node link; 727 }; 728 729 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, 730 struct kvm_irq_mask_notifier *kimn); 731 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, 732 struct kvm_irq_mask_notifier *kimn); 733 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, 734 bool mask); 735 736 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, 737 bool line_status); 738 int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level); 739 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, 740 int irq_source_id, int level, bool line_status); 741 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); 742 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); 743 void kvm_register_irq_ack_notifier(struct kvm *kvm, 744 struct kvm_irq_ack_notifier *kian); 745 void kvm_unregister_irq_ack_notifier(struct kvm *kvm, 746 struct kvm_irq_ack_notifier *kian); 747 int kvm_request_irq_source_id(struct kvm *kvm); 748 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); 749 750 /* For vcpu->arch.iommu_flags */ 751 #define KVM_IOMMU_CACHE_COHERENCY 0x1 752 753 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT 754 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); 755 void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); 756 int kvm_iommu_map_guest(struct kvm *kvm); 757 int kvm_iommu_unmap_guest(struct kvm *kvm); 758 int kvm_assign_device(struct kvm *kvm, 759 struct kvm_assigned_dev_kernel *assigned_dev); 760 int kvm_deassign_device(struct kvm *kvm, 761 struct kvm_assigned_dev_kernel *assigned_dev); 762 #else 763 static inline int kvm_iommu_map_pages(struct kvm *kvm, 764 struct kvm_memory_slot *slot) 765 { 766 return 0; 767 } 768 769 static inline void kvm_iommu_unmap_pages(struct kvm *kvm, 770 struct kvm_memory_slot *slot) 771 { 772 } 773 774 static inline int kvm_iommu_unmap_guest(struct kvm *kvm) 775 { 776 return 0; 777 } 778 #endif 779 780 static inline void kvm_guest_enter(void) 781 { 782 unsigned long flags; 783 784 BUG_ON(preemptible()); 785 786 local_irq_save(flags); 787 guest_enter(); 788 local_irq_restore(flags); 789 790 /* KVM does not hold any references to rcu protected data when it 791 * switches CPU into a guest mode. In fact switching to a guest mode 792 * is very similar to exiting to userspase from rcu point of view. In 793 * addition CPU may stay in a guest mode for quite a long time (up to 794 * one time slice). Lets treat guest mode as quiescent state, just like 795 * we do with user-mode execution. 796 */ 797 rcu_virt_note_context_switch(smp_processor_id()); 798 } 799 800 static inline void kvm_guest_exit(void) 801 { 802 unsigned long flags; 803 804 local_irq_save(flags); 805 guest_exit(); 806 local_irq_restore(flags); 807 } 808 809 /* 810 * search_memslots() and __gfn_to_memslot() are here because they are 811 * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. 812 * gfn_to_memslot() itself isn't here as an inline because that would 813 * bloat other code too much. 814 */ 815 static inline struct kvm_memory_slot * 816 search_memslots(struct kvm_memslots *slots, gfn_t gfn) 817 { 818 struct kvm_memory_slot *memslot; 819 820 kvm_for_each_memslot(memslot, slots) 821 if (gfn >= memslot->base_gfn && 822 gfn < memslot->base_gfn + memslot->npages) 823 return memslot; 824 825 return NULL; 826 } 827 828 static inline struct kvm_memory_slot * 829 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) 830 { 831 return search_memslots(slots, gfn); 832 } 833 834 static inline unsigned long 835 __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 836 { 837 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; 838 } 839 840 static inline int memslot_id(struct kvm *kvm, gfn_t gfn) 841 { 842 return gfn_to_memslot(kvm, gfn)->id; 843 } 844 845 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) 846 { 847 /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */ 848 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - 849 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); 850 } 851 852 static inline gfn_t 853 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) 854 { 855 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; 856 857 return slot->base_gfn + gfn_offset; 858 } 859 860 static inline gpa_t gfn_to_gpa(gfn_t gfn) 861 { 862 return (gpa_t)gfn << PAGE_SHIFT; 863 } 864 865 static inline gfn_t gpa_to_gfn(gpa_t gpa) 866 { 867 return (gfn_t)(gpa >> PAGE_SHIFT); 868 } 869 870 static inline hpa_t pfn_to_hpa(pfn_t pfn) 871 { 872 return (hpa_t)pfn << PAGE_SHIFT; 873 } 874 875 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu) 876 { 877 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); 878 } 879 880 enum kvm_stat_kind { 881 KVM_STAT_VM, 882 KVM_STAT_VCPU, 883 }; 884 885 struct kvm_stats_debugfs_item { 886 const char *name; 887 int offset; 888 enum kvm_stat_kind kind; 889 struct dentry *dentry; 890 }; 891 extern struct kvm_stats_debugfs_item debugfs_entries[]; 892 extern struct dentry *kvm_debugfs_dir; 893 894 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 895 static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) 896 { 897 if (unlikely(kvm->mmu_notifier_count)) 898 return 1; 899 /* 900 * Ensure the read of mmu_notifier_count happens before the read 901 * of mmu_notifier_seq. This interacts with the smp_wmb() in 902 * mmu_notifier_invalidate_range_end to make sure that the caller 903 * either sees the old (non-zero) value of mmu_notifier_count or 904 * the new (incremented) value of mmu_notifier_seq. 905 * PowerPC Book3s HV KVM calls this under a per-page lock 906 * rather than under kvm->mmu_lock, for scalability, so 907 * can't rely on kvm->mmu_lock to keep things ordered. 908 */ 909 smp_rmb(); 910 if (kvm->mmu_notifier_seq != mmu_seq) 911 return 1; 912 return 0; 913 } 914 #endif 915 916 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 917 918 #define KVM_MAX_IRQ_ROUTES 1024 919 920 int kvm_setup_default_irq_routing(struct kvm *kvm); 921 int kvm_set_irq_routing(struct kvm *kvm, 922 const struct kvm_irq_routing_entry *entries, 923 unsigned nr, 924 unsigned flags); 925 int kvm_set_routing_entry(struct kvm_irq_routing_table *rt, 926 struct kvm_kernel_irq_routing_entry *e, 927 const struct kvm_irq_routing_entry *ue); 928 void kvm_free_irq_routing(struct kvm *kvm); 929 930 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); 931 932 #else 933 934 static inline void kvm_free_irq_routing(struct kvm *kvm) {} 935 936 #endif 937 938 #ifdef CONFIG_HAVE_KVM_EVENTFD 939 940 void kvm_eventfd_init(struct kvm *kvm); 941 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); 942 943 #ifdef CONFIG_HAVE_KVM_IRQCHIP 944 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); 945 void kvm_irqfd_release(struct kvm *kvm); 946 void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *); 947 #else 948 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) 949 { 950 return -EINVAL; 951 } 952 953 static inline void kvm_irqfd_release(struct kvm *kvm) {} 954 #endif 955 956 #else 957 958 static inline void kvm_eventfd_init(struct kvm *kvm) {} 959 960 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) 961 { 962 return -EINVAL; 963 } 964 965 static inline void kvm_irqfd_release(struct kvm *kvm) {} 966 967 #ifdef CONFIG_HAVE_KVM_IRQCHIP 968 static inline void kvm_irq_routing_update(struct kvm *kvm, 969 struct kvm_irq_routing_table *irq_rt) 970 { 971 rcu_assign_pointer(kvm->irq_routing, irq_rt); 972 } 973 #endif 974 975 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) 976 { 977 return -ENOSYS; 978 } 979 980 #endif /* CONFIG_HAVE_KVM_EVENTFD */ 981 982 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 983 static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) 984 { 985 return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id; 986 } 987 988 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu); 989 990 #else 991 992 static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; } 993 994 #endif 995 996 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT 997 998 long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, 999 unsigned long arg); 1000 1001 void kvm_free_all_assigned_devices(struct kvm *kvm); 1002 1003 #else 1004 1005 static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, 1006 unsigned long arg) 1007 { 1008 return -ENOTTY; 1009 } 1010 1011 static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {} 1012 1013 #endif 1014 1015 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) 1016 { 1017 set_bit(req, &vcpu->requests); 1018 } 1019 1020 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) 1021 { 1022 if (test_bit(req, &vcpu->requests)) { 1023 clear_bit(req, &vcpu->requests); 1024 return true; 1025 } else { 1026 return false; 1027 } 1028 } 1029 1030 extern bool kvm_rebooting; 1031 1032 struct kvm_device_ops; 1033 1034 struct kvm_device { 1035 struct kvm_device_ops *ops; 1036 struct kvm *kvm; 1037 void *private; 1038 struct list_head vm_node; 1039 }; 1040 1041 /* create, destroy, and name are mandatory */ 1042 struct kvm_device_ops { 1043 const char *name; 1044 int (*create)(struct kvm_device *dev, u32 type); 1045 1046 /* 1047 * Destroy is responsible for freeing dev. 1048 * 1049 * Destroy may be called before or after destructors are called 1050 * on emulated I/O regions, depending on whether a reference is 1051 * held by a vcpu or other kvm component that gets destroyed 1052 * after the emulated I/O. 1053 */ 1054 void (*destroy)(struct kvm_device *dev); 1055 1056 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 1057 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 1058 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 1059 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, 1060 unsigned long arg); 1061 }; 1062 1063 void kvm_device_get(struct kvm_device *dev); 1064 void kvm_device_put(struct kvm_device *dev); 1065 struct kvm_device *kvm_device_from_filp(struct file *filp); 1066 1067 extern struct kvm_device_ops kvm_mpic_ops; 1068 extern struct kvm_device_ops kvm_xics_ops; 1069 1070 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 1071 1072 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) 1073 { 1074 vcpu->spin_loop.in_spin_loop = val; 1075 } 1076 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) 1077 { 1078 vcpu->spin_loop.dy_eligible = val; 1079 } 1080 1081 #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ 1082 1083 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) 1084 { 1085 } 1086 1087 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) 1088 { 1089 } 1090 1091 static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 1092 { 1093 return true; 1094 } 1095 1096 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ 1097 #endif 1098 1099