Lines Matching refs:kvm

187 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
189 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
226 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
228 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
230 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
272 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
273 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
274 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
324 struct kvm *kvm; member
667 struct kvm *kvm, int irq_source_id, int level,
700 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm);
710 static inline int kvm_arch_nr_memslot_as_ids(struct kvm *kvm) in kvm_arch_nr_memslot_as_ids() argument
726 static inline bool kvm_arch_has_private_mem(struct kvm *kvm) in kvm_arch_has_private_mem() argument
733 static inline bool kvm_arch_has_readonly_mem(struct kvm *kvm) in kvm_arch_has_readonly_mem() argument
756 struct kvm { struct
894 static inline void kvm_vm_dead(struct kvm *kvm) in kvm_vm_dead() argument
896 kvm->vm_dead = true; in kvm_vm_dead()
897 kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD); in kvm_vm_dead()
900 static inline void kvm_vm_bugged(struct kvm *kvm) in kvm_vm_bugged() argument
902 kvm->vm_bugged = true; in kvm_vm_bugged()
903 kvm_vm_dead(kvm); in kvm_vm_bugged()
907 #define KVM_BUG(cond, kvm, fmt...) \ argument
911 if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \
912 kvm_vm_bugged(kvm); \
916 #define KVM_BUG_ON(cond, kvm) \ argument
920 if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
921 kvm_vm_bugged(kvm); \
933 #define KVM_BUG_ON_DATA_CORRUPTION(cond, kvm) \ argument
939 else if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
940 kvm_vm_bugged(kvm); \
950 vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_vcpu_srcu_read_lock()
955 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx); in kvm_vcpu_srcu_read_unlock()
963 static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) in kvm_dirty_log_manual_protect_and_init_set() argument
965 return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); in kvm_dirty_log_manual_protect_and_init_set()
968 static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) in kvm_get_bus() argument
970 return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, in kvm_get_bus()
971 lockdep_is_held(&kvm->slots_lock) || in kvm_get_bus()
972 !refcount_read(&kvm->users_count)); in kvm_get_bus()
975 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) in kvm_get_vcpu() argument
977 int num_vcpus = atomic_read(&kvm->online_vcpus); in kvm_get_vcpu()
991 return xa_load(&kvm->vcpu_array, i); in kvm_get_vcpu()
994 #define kvm_for_each_vcpu(idx, vcpup, kvm) \ argument
995 if (atomic_read(&kvm->online_vcpus)) \
996 xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
997 (atomic_read(&kvm->online_vcpus) - 1))
999 static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) in kvm_get_vcpu_by_id() argument
1007 vcpu = kvm_get_vcpu(kvm, id); in kvm_get_vcpu_by_id()
1010 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_get_vcpu_by_id()
1016 void kvm_destroy_vcpus(struct kvm *kvm);
1022 void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
1023 void kvm_arch_post_irq_routing_update(struct kvm *kvm);
1025 static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) in kvm_arch_post_irq_ack_notifier_list_update() argument
1028 static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) in kvm_arch_post_irq_routing_update() argument
1049 void kvm_get_kvm(struct kvm *kvm);
1050 bool kvm_get_kvm_safe(struct kvm *kvm);
1051 void kvm_put_kvm(struct kvm *kvm);
1053 void kvm_put_kvm_no_destroy(struct kvm *kvm);
1055 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) in __kvm_memslots() argument
1058 return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, in __kvm_memslots()
1059 lockdep_is_held(&kvm->slots_lock) || in __kvm_memslots()
1060 !refcount_read(&kvm->users_count)); in __kvm_memslots()
1063 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) in kvm_memslots() argument
1065 return __kvm_memslots(kvm, 0); in kvm_memslots()
1072 return __kvm_memslots(vcpu->kvm, as_id); in kvm_vcpu_memslots()
1080 bool kvm_are_all_memslots_empty(struct kvm *kvm);
1195 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
1217 int kvm_set_internal_memslot(struct kvm *kvm,
1219 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
1220 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
1221 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1225 void kvm_arch_commit_memory_region(struct kvm *kvm,
1230 void kvm_arch_flush_shadow_all(struct kvm *kvm);
1232 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1238 struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write);
1239 static inline struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) in gfn_to_page() argument
1241 return __gfn_to_page(kvm, gfn, true); in gfn_to_page()
1244 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
1245 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
1261 static inline void kvm_release_faultin_page(struct kvm *kvm, struct page *page, in kvm_release_faultin_page() argument
1264 lockdep_assert_once(lockdep_is_held(&kvm->mmu_lock) || unused); in kvm_release_faultin_page()
1299 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1301 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
1302 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1304 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1307 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1309 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1311 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1313 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1316 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1319 #define __kvm_get_guest(kvm, gfn, offset, v) \ argument
1321 unsigned long __addr = gfn_to_hva(kvm, gfn); \
1330 #define kvm_get_guest(kvm, gpa, v) \ argument
1333 struct kvm *__kvm = kvm; \
1339 #define __kvm_put_guest(kvm, gfn, offset, v) \ argument
1341 unsigned long __addr = gfn_to_hva(kvm, gfn); \
1348 mark_page_dirty(kvm, gfn); \
1352 #define kvm_put_guest(kvm, gpa, v) \ argument
1355 struct kvm *__kvm = kvm; \
1361 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
1362 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
1365 void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
1366 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
1408 void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm);
1512 void kvm_flush_remote_tlbs(struct kvm *kvm);
1513 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
1514 void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
1525 void kvm_mmu_invalidate_begin(struct kvm *kvm);
1526 void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end);
1527 void kvm_mmu_invalidate_end(struct kvm *kvm);
1528 bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
1536 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
1538 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1542 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
1545 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
1546 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
1550 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
1552 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1580 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
1586 int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state);
1619 void kvm_arch_pre_destroy_vm(struct kvm *kvm);
1620 void kvm_arch_create_vm_debugfs(struct kvm *kvm);
1627 static inline struct kvm *kvm_arch_alloc_vm(void) in kvm_arch_alloc_vm()
1629 return kzalloc(sizeof(struct kvm), GFP_KERNEL_ACCOUNT); in kvm_arch_alloc_vm()
1633 static inline void __kvm_arch_free_vm(struct kvm *kvm) in __kvm_arch_free_vm() argument
1635 kvfree(kvm); in __kvm_arch_free_vm()
1639 static inline void kvm_arch_free_vm(struct kvm *kvm) in kvm_arch_free_vm() argument
1641 __kvm_arch_free_vm(kvm); in kvm_arch_free_vm()
1646 static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm) in kvm_arch_flush_remote_tlbs() argument
1651 int kvm_arch_flush_remote_tlbs(struct kvm *kvm);
1655 static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, in kvm_arch_flush_remote_tlbs_range() argument
1661 int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
1665 void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
1666 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
1667 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
1669 static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) in kvm_arch_register_noncoherent_dma() argument
1673 static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) in kvm_arch_unregister_noncoherent_dma() argument
1677 static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) in kvm_arch_has_noncoherent_dma() argument
1683 void kvm_arch_start_assignment(struct kvm *kvm);
1684 void kvm_arch_end_assignment(struct kvm *kvm);
1685 bool kvm_arch_has_assigned_device(struct kvm *kvm);
1687 static inline void kvm_arch_start_assignment(struct kvm *kvm) in kvm_arch_start_assignment() argument
1691 static inline void kvm_arch_end_assignment(struct kvm *kvm) in kvm_arch_end_assignment() argument
1695 static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm) in kvm_arch_has_assigned_device() argument
1730 bool kvm_arch_intc_initialized(struct kvm *kvm);
1732 static inline bool kvm_arch_intc_initialized(struct kvm *kvm) in kvm_arch_intc_initialized() argument
1748 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
1749 void kvm_arch_destroy_vm(struct kvm *kvm);
1759 int kvm_irq_map_gsi(struct kvm *kvm,
1761 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
1763 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1765 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
1768 struct kvm *kvm, int irq_source_id,
1770 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
1771 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
1772 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
1773 void kvm_register_irq_ack_notifier(struct kvm *kvm,
1775 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
1777 int kvm_request_irq_source_id(struct kvm *kvm);
1778 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
1779 bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
1869 static inline int memslot_id(struct kvm *kvm, gfn_t gfn) in memslot_id() argument
1871 return gfn_to_memslot(kvm, gfn)->id; in memslot_id()
1897 static inline bool kvm_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa) in kvm_is_gpa_in_memslot() argument
1899 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); in kvm_is_gpa_in_memslot()
1911 mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpa_to_gfn(gpc->gpa)); in kvm_gpc_mark_dirty_in_slot()
1920 struct kvm *kvm; member
2097 static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq) in mmu_invalidate_retry() argument
2099 if (unlikely(kvm->mmu_invalidate_in_progress)) in mmu_invalidate_retry()
2114 if (kvm->mmu_invalidate_seq != mmu_seq) in mmu_invalidate_retry()
2119 static inline int mmu_invalidate_retry_gfn(struct kvm *kvm, in mmu_invalidate_retry_gfn() argument
2123 lockdep_assert_held(&kvm->mmu_lock); in mmu_invalidate_retry_gfn()
2130 if (unlikely(kvm->mmu_invalidate_in_progress)) { in mmu_invalidate_retry_gfn()
2135 if (WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA || in mmu_invalidate_retry_gfn()
2136 kvm->mmu_invalidate_range_end == INVALID_GPA)) in mmu_invalidate_retry_gfn()
2139 if (gfn >= kvm->mmu_invalidate_range_start && in mmu_invalidate_retry_gfn()
2140 gfn < kvm->mmu_invalidate_range_end) in mmu_invalidate_retry_gfn()
2144 if (kvm->mmu_invalidate_seq != mmu_seq) in mmu_invalidate_retry_gfn()
2155 static inline bool mmu_invalidate_retry_gfn_unsafe(struct kvm *kvm, in mmu_invalidate_retry_gfn_unsafe() argument
2167 if (unlikely(READ_ONCE(kvm->mmu_invalidate_in_progress)) && in mmu_invalidate_retry_gfn_unsafe()
2168 gfn >= kvm->mmu_invalidate_range_start && in mmu_invalidate_retry_gfn_unsafe()
2169 gfn < kvm->mmu_invalidate_range_end) in mmu_invalidate_retry_gfn_unsafe()
2172 return READ_ONCE(kvm->mmu_invalidate_seq) != mmu_seq; in mmu_invalidate_retry_gfn_unsafe()
2180 bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
2181 int kvm_set_irq_routing(struct kvm *kvm,
2185 int kvm_init_irq_routing(struct kvm *kvm);
2186 int kvm_set_routing_entry(struct kvm *kvm,
2189 void kvm_free_irq_routing(struct kvm *kvm);
2193 static inline void kvm_free_irq_routing(struct kvm *kvm) {} in kvm_free_irq_routing() argument
2195 static inline int kvm_init_irq_routing(struct kvm *kvm) in kvm_init_irq_routing() argument
2202 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
2204 void kvm_eventfd_init(struct kvm *kvm);
2205 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
2208 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
2209 void kvm_irqfd_release(struct kvm *kvm);
2210 bool kvm_notify_irqfd_resampler(struct kvm *kvm,
2213 void kvm_irq_routing_update(struct kvm *);
2215 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) in kvm_irqfd() argument
2220 static inline void kvm_irqfd_release(struct kvm *kvm) {} in kvm_irqfd_release() argument
2222 static inline bool kvm_notify_irqfd_resampler(struct kvm *kvm, in kvm_notify_irqfd_resampler() argument
2230 void kvm_arch_irq_routing_update(struct kvm *kvm);
2297 struct kvm *kvm; member
2393 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
2435 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
2493 static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn) in kvm_get_memory_attributes() argument
2495 return xa_to_value(xa_load(&kvm->mem_attr_array, gfn)); in kvm_get_memory_attributes()
2498 bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2500 bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
2502 bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
2505 static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) in kvm_mem_is_private() argument
2508 kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE; in kvm_mem_is_private()
2511 static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) in kvm_mem_is_private() argument
2518 int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
2522 static inline int kvm_gmem_get_pfn(struct kvm *kvm, in kvm_gmem_get_pfn() argument
2527 KVM_BUG_ON(1, kvm); in kvm_gmem_get_pfn()
2533 int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order);
2558 typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
2561 long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages,