1 #ifndef __KVM_HOST_H 2 #define __KVM_HOST_H 3 4 /* 5 * This work is licensed under the terms of the GNU GPL, version 2. See 6 * the COPYING file in the top-level directory. 7 */ 8 9 #include <linux/types.h> 10 #include <linux/hardirq.h> 11 #include <linux/list.h> 12 #include <linux/mutex.h> 13 #include <linux/spinlock.h> 14 #include <linux/signal.h> 15 #include <linux/sched.h> 16 #include <linux/bug.h> 17 #include <linux/mm.h> 18 #include <linux/mmu_notifier.h> 19 #include <linux/preempt.h> 20 #include <linux/msi.h> 21 #include <linux/slab.h> 22 #include <linux/rcupdate.h> 23 #include <linux/ratelimit.h> 24 #include <linux/err.h> 25 #include <linux/irqflags.h> 26 #include <linux/context_tracking.h> 27 #include <linux/irqbypass.h> 28 #include <linux/swait.h> 29 #include <asm/signal.h> 30 31 #include <linux/kvm.h> 32 #include <linux/kvm_para.h> 33 34 #include <linux/kvm_types.h> 35 36 #include <asm/kvm_host.h> 37 38 #ifndef KVM_MAX_VCPU_ID 39 #define KVM_MAX_VCPU_ID KVM_MAX_VCPUS 40 #endif 41 42 /* 43 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used 44 * in kvm, other bits are visible for userspace which are defined in 45 * include/linux/kvm_h. 46 */ 47 #define KVM_MEMSLOT_INVALID (1UL << 16) 48 #define KVM_MEMSLOT_INCOHERENT (1UL << 17) 49 50 /* Two fragments for cross MMIO pages. */ 51 #define KVM_MAX_MMIO_FRAGMENTS 2 52 53 #ifndef KVM_ADDRESS_SPACE_NUM 54 #define KVM_ADDRESS_SPACE_NUM 1 55 #endif 56 57 /* 58 * For the normal pfn, the highest 12 bits should be zero, 59 * so we can mask bit 62 ~ bit 52 to indicate the error pfn, 60 * mask bit 63 to indicate the noslot pfn. 61 */ 62 #define KVM_PFN_ERR_MASK (0x7ffULL << 52) 63 #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) 64 #define KVM_PFN_NOSLOT (0x1ULL << 63) 65 66 #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) 67 #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) 68 #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) 69 70 /* 71 * error pfns indicate that the gfn is in slot but faild to 72 * translate it to pfn on host. 73 */ 74 static inline bool is_error_pfn(kvm_pfn_t pfn) 75 { 76 return !!(pfn & KVM_PFN_ERR_MASK); 77 } 78 79 /* 80 * error_noslot pfns indicate that the gfn can not be 81 * translated to pfn - it is not in slot or failed to 82 * translate it to pfn. 83 */ 84 static inline bool is_error_noslot_pfn(kvm_pfn_t pfn) 85 { 86 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); 87 } 88 89 /* noslot pfn indicates that the gfn is not in slot. */ 90 static inline bool is_noslot_pfn(kvm_pfn_t pfn) 91 { 92 return pfn == KVM_PFN_NOSLOT; 93 } 94 95 /* 96 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390) 97 * provide own defines and kvm_is_error_hva 98 */ 99 #ifndef KVM_HVA_ERR_BAD 100 101 #define KVM_HVA_ERR_BAD (PAGE_OFFSET) 102 #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) 103 104 static inline bool kvm_is_error_hva(unsigned long addr) 105 { 106 return addr >= PAGE_OFFSET; 107 } 108 109 #endif 110 111 #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) 112 113 static inline bool is_error_page(struct page *page) 114 { 115 return IS_ERR(page); 116 } 117 118 /* 119 * Architecture-independent vcpu->requests bit members 120 * Bits 4-7 are reserved for more arch-independent bits. 121 */ 122 #define KVM_REQ_TLB_FLUSH 0 123 #define KVM_REQ_MMU_RELOAD 1 124 #define KVM_REQ_PENDING_TIMER 2 125 #define KVM_REQ_UNHALT 3 126 127 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 128 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 129 130 extern struct kmem_cache *kvm_vcpu_cache; 131 132 extern spinlock_t kvm_lock; 133 extern struct list_head vm_list; 134 135 struct kvm_io_range { 136 gpa_t addr; 137 int len; 138 struct kvm_io_device *dev; 139 }; 140 141 #define NR_IOBUS_DEVS 1000 142 143 struct kvm_io_bus { 144 int dev_count; 145 int ioeventfd_count; 146 struct kvm_io_range range[]; 147 }; 148 149 enum kvm_bus { 150 KVM_MMIO_BUS, 151 KVM_PIO_BUS, 152 KVM_VIRTIO_CCW_NOTIFY_BUS, 153 KVM_FAST_MMIO_BUS, 154 KVM_NR_BUSES 155 }; 156 157 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 158 int len, const void *val); 159 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 160 gpa_t addr, int len, const void *val, long cookie); 161 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 162 int len, void *val); 163 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 164 int len, struct kvm_io_device *dev); 165 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 166 struct kvm_io_device *dev); 167 168 #ifdef CONFIG_KVM_ASYNC_PF 169 struct kvm_async_pf { 170 struct work_struct work; 171 struct list_head link; 172 struct list_head queue; 173 struct kvm_vcpu *vcpu; 174 struct mm_struct *mm; 175 gva_t gva; 176 unsigned long addr; 177 struct kvm_arch_async_pf arch; 178 bool wakeup_all; 179 }; 180 181 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); 182 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); 183 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, 184 struct kvm_arch_async_pf *arch); 185 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); 186 #endif 187 188 enum { 189 OUTSIDE_GUEST_MODE, 190 IN_GUEST_MODE, 191 EXITING_GUEST_MODE, 192 READING_SHADOW_PAGE_TABLES, 193 }; 194 195 /* 196 * Sometimes a large or cross-page mmio needs to be broken up into separate 197 * exits for userspace servicing. 198 */ 199 struct kvm_mmio_fragment { 200 gpa_t gpa; 201 void *data; 202 unsigned len; 203 }; 204 205 struct kvm_vcpu { 206 struct kvm *kvm; 207 #ifdef CONFIG_PREEMPT_NOTIFIERS 208 struct preempt_notifier preempt_notifier; 209 #endif 210 int cpu; 211 int vcpu_id; 212 int srcu_idx; 213 int mode; 214 unsigned long requests; 215 unsigned long guest_debug; 216 217 int pre_pcpu; 218 struct list_head blocked_vcpu_list; 219 220 struct mutex mutex; 221 struct kvm_run *run; 222 223 int fpu_active; 224 int guest_fpu_loaded, guest_xcr0_loaded; 225 unsigned char fpu_counter; 226 struct swait_queue_head wq; 227 struct pid *pid; 228 int sigset_active; 229 sigset_t sigset; 230 struct kvm_vcpu_stat stat; 231 unsigned int halt_poll_ns; 232 233 #ifdef CONFIG_HAS_IOMEM 234 int mmio_needed; 235 int mmio_read_completed; 236 int mmio_is_write; 237 int mmio_cur_fragment; 238 int mmio_nr_fragments; 239 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; 240 #endif 241 242 #ifdef CONFIG_KVM_ASYNC_PF 243 struct { 244 u32 queued; 245 struct list_head queue; 246 struct list_head done; 247 spinlock_t lock; 248 } async_pf; 249 #endif 250 251 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 252 /* 253 * Cpu relax intercept or pause loop exit optimization 254 * in_spin_loop: set when a vcpu does a pause loop exit 255 * or cpu relax intercepted. 256 * dy_eligible: indicates whether vcpu is eligible for directed yield. 257 */ 258 struct { 259 bool in_spin_loop; 260 bool dy_eligible; 261 } spin_loop; 262 #endif 263 bool preempted; 264 struct kvm_vcpu_arch arch; 265 }; 266 267 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) 268 { 269 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); 270 } 271 272 /* 273 * Some of the bitops functions do not support too long bitmaps. 274 * This number must be determined not to exceed such limits. 275 */ 276 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) 277 278 struct kvm_memory_slot { 279 gfn_t base_gfn; 280 unsigned long npages; 281 unsigned long *dirty_bitmap; 282 struct kvm_arch_memory_slot arch; 283 unsigned long userspace_addr; 284 u32 flags; 285 short id; 286 }; 287 288 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) 289 { 290 return ALIGN(memslot->npages, BITS_PER_LONG) / 8; 291 } 292 293 struct kvm_s390_adapter_int { 294 u64 ind_addr; 295 u64 summary_addr; 296 u64 ind_offset; 297 u32 summary_offset; 298 u32 adapter_id; 299 }; 300 301 struct kvm_hv_sint { 302 u32 vcpu; 303 u32 sint; 304 }; 305 306 struct kvm_kernel_irq_routing_entry { 307 u32 gsi; 308 u32 type; 309 int (*set)(struct kvm_kernel_irq_routing_entry *e, 310 struct kvm *kvm, int irq_source_id, int level, 311 bool line_status); 312 union { 313 struct { 314 unsigned irqchip; 315 unsigned pin; 316 } irqchip; 317 struct msi_msg msi; 318 struct kvm_s390_adapter_int adapter; 319 struct kvm_hv_sint hv_sint; 320 }; 321 struct hlist_node link; 322 }; 323 324 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 325 struct kvm_irq_routing_table { 326 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; 327 u32 nr_rt_entries; 328 /* 329 * Array indexed by gsi. Each entry contains list of irq chips 330 * the gsi is connected to. 331 */ 332 struct hlist_head map[0]; 333 }; 334 #endif 335 336 #ifndef KVM_PRIVATE_MEM_SLOTS 337 #define KVM_PRIVATE_MEM_SLOTS 0 338 #endif 339 340 #ifndef KVM_MEM_SLOTS_NUM 341 #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) 342 #endif 343 344 #ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE 345 static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) 346 { 347 return 0; 348 } 349 #endif 350 351 /* 352 * Note: 353 * memslots are not sorted by id anymore, please use id_to_memslot() 354 * to get the memslot by its id. 355 */ 356 struct kvm_memslots { 357 u64 generation; 358 struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; 359 /* The mapping table from slot id to the index in memslots[]. */ 360 short id_to_index[KVM_MEM_SLOTS_NUM]; 361 atomic_t lru_slot; 362 int used_slots; 363 }; 364 365 struct kvm { 366 spinlock_t mmu_lock; 367 struct mutex slots_lock; 368 struct mm_struct *mm; /* userspace tied to this vm */ 369 struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM]; 370 struct srcu_struct srcu; 371 struct srcu_struct irq_srcu; 372 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; 373 atomic_t online_vcpus; 374 int last_boosted_vcpu; 375 struct list_head vm_list; 376 struct mutex lock; 377 struct kvm_io_bus *buses[KVM_NR_BUSES]; 378 #ifdef CONFIG_HAVE_KVM_EVENTFD 379 struct { 380 spinlock_t lock; 381 struct list_head items; 382 struct list_head resampler_list; 383 struct mutex resampler_lock; 384 } irqfds; 385 struct list_head ioeventfds; 386 #endif 387 struct kvm_vm_stat stat; 388 struct kvm_arch arch; 389 atomic_t users_count; 390 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 391 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; 392 spinlock_t ring_lock; 393 struct list_head coalesced_zones; 394 #endif 395 396 struct mutex irq_lock; 397 #ifdef CONFIG_HAVE_KVM_IRQCHIP 398 /* 399 * Update side is protected by irq_lock. 400 */ 401 struct kvm_irq_routing_table __rcu *irq_routing; 402 #endif 403 #ifdef CONFIG_HAVE_KVM_IRQFD 404 struct hlist_head irq_ack_notifier_list; 405 #endif 406 407 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 408 struct mmu_notifier mmu_notifier; 409 unsigned long mmu_notifier_seq; 410 long mmu_notifier_count; 411 #endif 412 long tlbs_dirty; 413 struct list_head devices; 414 }; 415 416 #define kvm_err(fmt, ...) \ 417 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 418 #define kvm_info(fmt, ...) \ 419 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 420 #define kvm_debug(fmt, ...) \ 421 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 422 #define kvm_pr_unimpl(fmt, ...) \ 423 pr_err_ratelimited("kvm [%i]: " fmt, \ 424 task_tgid_nr(current), ## __VA_ARGS__) 425 426 /* The guest did something we don't support. */ 427 #define vcpu_unimpl(vcpu, fmt, ...) \ 428 kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \ 429 (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__) 430 431 #define vcpu_debug(vcpu, fmt, ...) \ 432 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) 433 #define vcpu_err(vcpu, fmt, ...) \ 434 kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) 435 436 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) 437 { 438 /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case 439 * the caller has read kvm->online_vcpus before (as is the case 440 * for kvm_for_each_vcpu, for example). 441 */ 442 smp_rmb(); 443 return kvm->vcpus[i]; 444 } 445 446 #define kvm_for_each_vcpu(idx, vcpup, kvm) \ 447 for (idx = 0; \ 448 idx < atomic_read(&kvm->online_vcpus) && \ 449 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ 450 idx++) 451 452 static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) 453 { 454 struct kvm_vcpu *vcpu = NULL; 455 int i; 456 457 if (id < 0) 458 return NULL; 459 if (id < KVM_MAX_VCPUS) 460 vcpu = kvm_get_vcpu(kvm, id); 461 if (vcpu && vcpu->vcpu_id == id) 462 return vcpu; 463 kvm_for_each_vcpu(i, vcpu, kvm) 464 if (vcpu->vcpu_id == id) 465 return vcpu; 466 return NULL; 467 } 468 469 #define kvm_for_each_memslot(memslot, slots) \ 470 for (memslot = &slots->memslots[0]; \ 471 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ 472 memslot++) 473 474 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); 475 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); 476 477 int __must_check vcpu_load(struct kvm_vcpu *vcpu); 478 void vcpu_put(struct kvm_vcpu *vcpu); 479 480 #ifdef __KVM_HAVE_IOAPIC 481 void kvm_vcpu_request_scan_ioapic(struct kvm *kvm); 482 void kvm_arch_post_irq_routing_update(struct kvm *kvm); 483 #else 484 static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) 485 { 486 } 487 static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) 488 { 489 } 490 #endif 491 492 #ifdef CONFIG_HAVE_KVM_IRQFD 493 int kvm_irqfd_init(void); 494 void kvm_irqfd_exit(void); 495 #else 496 static inline int kvm_irqfd_init(void) 497 { 498 return 0; 499 } 500 501 static inline void kvm_irqfd_exit(void) 502 { 503 } 504 #endif 505 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 506 struct module *module); 507 void kvm_exit(void); 508 509 void kvm_get_kvm(struct kvm *kvm); 510 void kvm_put_kvm(struct kvm *kvm); 511 512 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) 513 { 514 return rcu_dereference_check(kvm->memslots[as_id], 515 srcu_read_lock_held(&kvm->srcu) 516 || lockdep_is_held(&kvm->slots_lock)); 517 } 518 519 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) 520 { 521 return __kvm_memslots(kvm, 0); 522 } 523 524 static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) 525 { 526 int as_id = kvm_arch_vcpu_memslots_id(vcpu); 527 528 return __kvm_memslots(vcpu->kvm, as_id); 529 } 530 531 static inline struct kvm_memory_slot * 532 id_to_memslot(struct kvm_memslots *slots, int id) 533 { 534 int index = slots->id_to_index[id]; 535 struct kvm_memory_slot *slot; 536 537 slot = &slots->memslots[index]; 538 539 WARN_ON(slot->id != id); 540 return slot; 541 } 542 543 /* 544 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: 545 * - create a new memory slot 546 * - delete an existing memory slot 547 * - modify an existing memory slot 548 * -- move it in the guest physical memory space 549 * -- just change its flags 550 * 551 * Since flags can be changed by some of these operations, the following 552 * differentiation is the best we can do for __kvm_set_memory_region(): 553 */ 554 enum kvm_mr_change { 555 KVM_MR_CREATE, 556 KVM_MR_DELETE, 557 KVM_MR_MOVE, 558 KVM_MR_FLAGS_ONLY, 559 }; 560 561 int kvm_set_memory_region(struct kvm *kvm, 562 const struct kvm_userspace_memory_region *mem); 563 int __kvm_set_memory_region(struct kvm *kvm, 564 const struct kvm_userspace_memory_region *mem); 565 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 566 struct kvm_memory_slot *dont); 567 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 568 unsigned long npages); 569 void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots); 570 int kvm_arch_prepare_memory_region(struct kvm *kvm, 571 struct kvm_memory_slot *memslot, 572 const struct kvm_userspace_memory_region *mem, 573 enum kvm_mr_change change); 574 void kvm_arch_commit_memory_region(struct kvm *kvm, 575 const struct kvm_userspace_memory_region *mem, 576 const struct kvm_memory_slot *old, 577 const struct kvm_memory_slot *new, 578 enum kvm_mr_change change); 579 bool kvm_largepages_enabled(void); 580 void kvm_disable_largepages(void); 581 /* flush all memory translations */ 582 void kvm_arch_flush_shadow_all(struct kvm *kvm); 583 /* flush memory translations pointing to 'slot' */ 584 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 585 struct kvm_memory_slot *slot); 586 587 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 588 struct page **pages, int nr_pages); 589 590 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 591 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 592 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); 593 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 594 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, 595 bool *writable); 596 void kvm_release_page_clean(struct page *page); 597 void kvm_release_page_dirty(struct page *page); 598 void kvm_set_page_accessed(struct page *page); 599 600 kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); 601 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 602 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 603 bool *writable); 604 kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 605 kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); 606 kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, 607 bool atomic, bool *async, bool write_fault, 608 bool *writable); 609 610 void kvm_release_pfn_clean(kvm_pfn_t pfn); 611 void kvm_set_pfn_dirty(kvm_pfn_t pfn); 612 void kvm_set_pfn_accessed(kvm_pfn_t pfn); 613 void kvm_get_pfn(kvm_pfn_t pfn); 614 615 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 616 int len); 617 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 618 unsigned long len); 619 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); 620 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 621 void *data, unsigned long len); 622 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 623 int offset, int len); 624 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 625 unsigned long len); 626 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 627 void *data, unsigned long len); 628 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 629 gpa_t gpa, unsigned long len); 630 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); 631 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); 632 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 633 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); 634 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); 635 void mark_page_dirty(struct kvm *kvm, gfn_t gfn); 636 637 struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); 638 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); 639 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); 640 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); 641 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); 642 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); 643 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); 644 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, 645 int len); 646 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, 647 unsigned long len); 648 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, 649 unsigned long len); 650 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, 651 int offset, int len); 652 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 653 unsigned long len); 654 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); 655 656 void kvm_vcpu_block(struct kvm_vcpu *vcpu); 657 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); 658 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); 659 void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 660 int kvm_vcpu_yield_to(struct kvm_vcpu *target); 661 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); 662 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); 663 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); 664 665 void kvm_flush_remote_tlbs(struct kvm *kvm); 666 void kvm_reload_remote_mmus(struct kvm *kvm); 667 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); 668 669 long kvm_arch_dev_ioctl(struct file *filp, 670 unsigned int ioctl, unsigned long arg); 671 long kvm_arch_vcpu_ioctl(struct file *filp, 672 unsigned int ioctl, unsigned long arg); 673 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); 674 675 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); 676 677 int kvm_get_dirty_log(struct kvm *kvm, 678 struct kvm_dirty_log *log, int *is_dirty); 679 680 int kvm_get_dirty_log_protect(struct kvm *kvm, 681 struct kvm_dirty_log *log, bool *is_dirty); 682 683 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, 684 struct kvm_memory_slot *slot, 685 gfn_t gfn_offset, 686 unsigned long mask); 687 688 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 689 struct kvm_dirty_log *log); 690 691 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, 692 bool line_status); 693 long kvm_arch_vm_ioctl(struct file *filp, 694 unsigned int ioctl, unsigned long arg); 695 696 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 697 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 698 699 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 700 struct kvm_translation *tr); 701 702 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 703 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 704 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 705 struct kvm_sregs *sregs); 706 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 707 struct kvm_sregs *sregs); 708 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 709 struct kvm_mp_state *mp_state); 710 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 711 struct kvm_mp_state *mp_state); 712 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 713 struct kvm_guest_debug *dbg); 714 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); 715 716 int kvm_arch_init(void *opaque); 717 void kvm_arch_exit(void); 718 719 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); 720 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); 721 722 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); 723 724 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); 725 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 726 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); 727 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); 728 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); 729 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); 730 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); 731 732 int kvm_arch_hardware_enable(void); 733 void kvm_arch_hardware_disable(void); 734 int kvm_arch_hardware_setup(void); 735 void kvm_arch_hardware_unsetup(void); 736 void kvm_arch_check_processor_compat(void *rtn); 737 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); 738 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); 739 740 void *kvm_kvzalloc(unsigned long size); 741 742 #ifndef __KVM_HAVE_ARCH_VM_ALLOC 743 static inline struct kvm *kvm_arch_alloc_vm(void) 744 { 745 return kzalloc(sizeof(struct kvm), GFP_KERNEL); 746 } 747 748 static inline void kvm_arch_free_vm(struct kvm *kvm) 749 { 750 kfree(kvm); 751 } 752 #endif 753 754 #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA 755 void kvm_arch_register_noncoherent_dma(struct kvm *kvm); 756 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); 757 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm); 758 #else 759 static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) 760 { 761 } 762 763 static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) 764 { 765 } 766 767 static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) 768 { 769 return false; 770 } 771 #endif 772 #ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE 773 void kvm_arch_start_assignment(struct kvm *kvm); 774 void kvm_arch_end_assignment(struct kvm *kvm); 775 bool kvm_arch_has_assigned_device(struct kvm *kvm); 776 #else 777 static inline void kvm_arch_start_assignment(struct kvm *kvm) 778 { 779 } 780 781 static inline void kvm_arch_end_assignment(struct kvm *kvm) 782 { 783 } 784 785 static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) 786 { 787 return false; 788 } 789 #endif 790 791 static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) 792 { 793 #ifdef __KVM_HAVE_ARCH_WQP 794 return vcpu->arch.wqp; 795 #else 796 return &vcpu->wq; 797 #endif 798 } 799 800 #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED 801 /* 802 * returns true if the virtual interrupt controller is initialized and 803 * ready to accept virtual IRQ. On some architectures the virtual interrupt 804 * controller is dynamically instantiated and this is not always true. 805 */ 806 bool kvm_arch_intc_initialized(struct kvm *kvm); 807 #else 808 static inline bool kvm_arch_intc_initialized(struct kvm *kvm) 809 { 810 return true; 811 } 812 #endif 813 814 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); 815 void kvm_arch_destroy_vm(struct kvm *kvm); 816 void kvm_arch_sync_events(struct kvm *kvm); 817 818 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 819 void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 820 821 bool kvm_is_reserved_pfn(kvm_pfn_t pfn); 822 823 struct kvm_irq_ack_notifier { 824 struct hlist_node link; 825 unsigned gsi; 826 void (*irq_acked)(struct kvm_irq_ack_notifier *kian); 827 }; 828 829 int kvm_irq_map_gsi(struct kvm *kvm, 830 struct kvm_kernel_irq_routing_entry *entries, int gsi); 831 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); 832 833 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, 834 bool line_status); 835 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, 836 int irq_source_id, int level, bool line_status); 837 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, 838 struct kvm *kvm, int irq_source_id, 839 int level, bool line_status); 840 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); 841 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); 842 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); 843 void kvm_register_irq_ack_notifier(struct kvm *kvm, 844 struct kvm_irq_ack_notifier *kian); 845 void kvm_unregister_irq_ack_notifier(struct kvm *kvm, 846 struct kvm_irq_ack_notifier *kian); 847 int kvm_request_irq_source_id(struct kvm *kvm); 848 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); 849 850 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT 851 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); 852 void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); 853 #else 854 static inline int kvm_iommu_map_pages(struct kvm *kvm, 855 struct kvm_memory_slot *slot) 856 { 857 return 0; 858 } 859 860 static inline void kvm_iommu_unmap_pages(struct kvm *kvm, 861 struct kvm_memory_slot *slot) 862 { 863 } 864 #endif 865 866 /* must be called with irqs disabled */ 867 static inline void __kvm_guest_enter(void) 868 { 869 guest_enter(); 870 /* KVM does not hold any references to rcu protected data when it 871 * switches CPU into a guest mode. In fact switching to a guest mode 872 * is very similar to exiting to userspace from rcu point of view. In 873 * addition CPU may stay in a guest mode for quite a long time (up to 874 * one time slice). Lets treat guest mode as quiescent state, just like 875 * we do with user-mode execution. 876 */ 877 if (!context_tracking_cpu_is_enabled()) 878 rcu_virt_note_context_switch(smp_processor_id()); 879 } 880 881 /* must be called with irqs disabled */ 882 static inline void __kvm_guest_exit(void) 883 { 884 guest_exit(); 885 } 886 887 static inline void kvm_guest_enter(void) 888 { 889 unsigned long flags; 890 891 local_irq_save(flags); 892 __kvm_guest_enter(); 893 local_irq_restore(flags); 894 } 895 896 static inline void kvm_guest_exit(void) 897 { 898 unsigned long flags; 899 900 local_irq_save(flags); 901 __kvm_guest_exit(); 902 local_irq_restore(flags); 903 } 904 905 /* 906 * search_memslots() and __gfn_to_memslot() are here because they are 907 * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. 908 * gfn_to_memslot() itself isn't here as an inline because that would 909 * bloat other code too much. 910 */ 911 static inline struct kvm_memory_slot * 912 search_memslots(struct kvm_memslots *slots, gfn_t gfn) 913 { 914 int start = 0, end = slots->used_slots; 915 int slot = atomic_read(&slots->lru_slot); 916 struct kvm_memory_slot *memslots = slots->memslots; 917 918 if (gfn >= memslots[slot].base_gfn && 919 gfn < memslots[slot].base_gfn + memslots[slot].npages) 920 return &memslots[slot]; 921 922 while (start < end) { 923 slot = start + (end - start) / 2; 924 925 if (gfn >= memslots[slot].base_gfn) 926 end = slot; 927 else 928 start = slot + 1; 929 } 930 931 if (gfn >= memslots[start].base_gfn && 932 gfn < memslots[start].base_gfn + memslots[start].npages) { 933 atomic_set(&slots->lru_slot, start); 934 return &memslots[start]; 935 } 936 937 return NULL; 938 } 939 940 static inline struct kvm_memory_slot * 941 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) 942 { 943 return search_memslots(slots, gfn); 944 } 945 946 static inline unsigned long 947 __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 948 { 949 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; 950 } 951 952 static inline int memslot_id(struct kvm *kvm, gfn_t gfn) 953 { 954 return gfn_to_memslot(kvm, gfn)->id; 955 } 956 957 static inline gfn_t 958 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) 959 { 960 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; 961 962 return slot->base_gfn + gfn_offset; 963 } 964 965 static inline gpa_t gfn_to_gpa(gfn_t gfn) 966 { 967 return (gpa_t)gfn << PAGE_SHIFT; 968 } 969 970 static inline gfn_t gpa_to_gfn(gpa_t gpa) 971 { 972 return (gfn_t)(gpa >> PAGE_SHIFT); 973 } 974 975 static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) 976 { 977 return (hpa_t)pfn << PAGE_SHIFT; 978 } 979 980 static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) 981 { 982 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); 983 984 return kvm_is_error_hva(hva); 985 } 986 987 enum kvm_stat_kind { 988 KVM_STAT_VM, 989 KVM_STAT_VCPU, 990 }; 991 992 struct kvm_stats_debugfs_item { 993 const char *name; 994 int offset; 995 enum kvm_stat_kind kind; 996 }; 997 extern struct kvm_stats_debugfs_item debugfs_entries[]; 998 extern struct dentry *kvm_debugfs_dir; 999 1000 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1001 static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) 1002 { 1003 if (unlikely(kvm->mmu_notifier_count)) 1004 return 1; 1005 /* 1006 * Ensure the read of mmu_notifier_count happens before the read 1007 * of mmu_notifier_seq. This interacts with the smp_wmb() in 1008 * mmu_notifier_invalidate_range_end to make sure that the caller 1009 * either sees the old (non-zero) value of mmu_notifier_count or 1010 * the new (incremented) value of mmu_notifier_seq. 1011 * PowerPC Book3s HV KVM calls this under a per-page lock 1012 * rather than under kvm->mmu_lock, for scalability, so 1013 * can't rely on kvm->mmu_lock to keep things ordered. 1014 */ 1015 smp_rmb(); 1016 if (kvm->mmu_notifier_seq != mmu_seq) 1017 return 1; 1018 return 0; 1019 } 1020 #endif 1021 1022 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 1023 1024 #ifdef CONFIG_S390 1025 #define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that... 1026 #else 1027 #define KVM_MAX_IRQ_ROUTES 1024 1028 #endif 1029 1030 int kvm_setup_default_irq_routing(struct kvm *kvm); 1031 int kvm_setup_empty_irq_routing(struct kvm *kvm); 1032 int kvm_set_irq_routing(struct kvm *kvm, 1033 const struct kvm_irq_routing_entry *entries, 1034 unsigned nr, 1035 unsigned flags); 1036 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e, 1037 const struct kvm_irq_routing_entry *ue); 1038 void kvm_free_irq_routing(struct kvm *kvm); 1039 1040 #else 1041 1042 static inline void kvm_free_irq_routing(struct kvm *kvm) {} 1043 1044 #endif 1045 1046 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); 1047 1048 #ifdef CONFIG_HAVE_KVM_EVENTFD 1049 1050 void kvm_eventfd_init(struct kvm *kvm); 1051 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); 1052 1053 #ifdef CONFIG_HAVE_KVM_IRQFD 1054 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); 1055 void kvm_irqfd_release(struct kvm *kvm); 1056 void kvm_irq_routing_update(struct kvm *); 1057 #else 1058 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) 1059 { 1060 return -EINVAL; 1061 } 1062 1063 static inline void kvm_irqfd_release(struct kvm *kvm) {} 1064 #endif 1065 1066 #else 1067 1068 static inline void kvm_eventfd_init(struct kvm *kvm) {} 1069 1070 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) 1071 { 1072 return -EINVAL; 1073 } 1074 1075 static inline void kvm_irqfd_release(struct kvm *kvm) {} 1076 1077 #ifdef CONFIG_HAVE_KVM_IRQCHIP 1078 static inline void kvm_irq_routing_update(struct kvm *kvm) 1079 { 1080 } 1081 #endif 1082 void kvm_arch_irq_routing_update(struct kvm *kvm); 1083 1084 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) 1085 { 1086 return -ENOSYS; 1087 } 1088 1089 #endif /* CONFIG_HAVE_KVM_EVENTFD */ 1090 1091 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1092 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu); 1093 #else 1094 static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; } 1095 #endif 1096 1097 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) 1098 { 1099 /* 1100 * Ensure the rest of the request is published to kvm_check_request's 1101 * caller. Paired with the smp_mb__after_atomic in kvm_check_request. 1102 */ 1103 smp_wmb(); 1104 set_bit(req, &vcpu->requests); 1105 } 1106 1107 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) 1108 { 1109 if (test_bit(req, &vcpu->requests)) { 1110 clear_bit(req, &vcpu->requests); 1111 1112 /* 1113 * Ensure the rest of the request is visible to kvm_check_request's 1114 * caller. Paired with the smp_wmb in kvm_make_request. 1115 */ 1116 smp_mb__after_atomic(); 1117 return true; 1118 } else { 1119 return false; 1120 } 1121 } 1122 1123 extern bool kvm_rebooting; 1124 1125 struct kvm_device { 1126 struct kvm_device_ops *ops; 1127 struct kvm *kvm; 1128 void *private; 1129 struct list_head vm_node; 1130 }; 1131 1132 /* create, destroy, and name are mandatory */ 1133 struct kvm_device_ops { 1134 const char *name; 1135 int (*create)(struct kvm_device *dev, u32 type); 1136 1137 /* 1138 * Destroy is responsible for freeing dev. 1139 * 1140 * Destroy may be called before or after destructors are called 1141 * on emulated I/O regions, depending on whether a reference is 1142 * held by a vcpu or other kvm component that gets destroyed 1143 * after the emulated I/O. 1144 */ 1145 void (*destroy)(struct kvm_device *dev); 1146 1147 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 1148 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 1149 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 1150 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, 1151 unsigned long arg); 1152 }; 1153 1154 void kvm_device_get(struct kvm_device *dev); 1155 void kvm_device_put(struct kvm_device *dev); 1156 struct kvm_device *kvm_device_from_filp(struct file *filp); 1157 int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type); 1158 void kvm_unregister_device_ops(u32 type); 1159 1160 extern struct kvm_device_ops kvm_mpic_ops; 1161 extern struct kvm_device_ops kvm_xics_ops; 1162 extern struct kvm_device_ops kvm_arm_vgic_v2_ops; 1163 extern struct kvm_device_ops kvm_arm_vgic_v3_ops; 1164 1165 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 1166 1167 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) 1168 { 1169 vcpu->spin_loop.in_spin_loop = val; 1170 } 1171 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) 1172 { 1173 vcpu->spin_loop.dy_eligible = val; 1174 } 1175 1176 #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ 1177 1178 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) 1179 { 1180 } 1181 1182 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) 1183 { 1184 } 1185 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ 1186 1187 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS 1188 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, 1189 struct irq_bypass_producer *); 1190 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, 1191 struct irq_bypass_producer *); 1192 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); 1193 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); 1194 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, 1195 uint32_t guest_irq, bool set); 1196 #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ 1197 1198 #endif 1199