xref: /linux-6.15/include/linux/kvm_types.h (revision c23e2b71)
1cd93f165SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
2edf88417SAvi Kivity 
3edf88417SAvi Kivity #ifndef __KVM_TYPES_H__
4edf88417SAvi Kivity #define __KVM_TYPES_H__
5edf88417SAvi Kivity 
665647300SPaolo Bonzini struct kvm;
765647300SPaolo Bonzini struct kvm_async_pf;
865647300SPaolo Bonzini struct kvm_device_ops;
9f128cf8cSSean Christopherson struct kvm_gfn_range;
1065647300SPaolo Bonzini struct kvm_interrupt;
1165647300SPaolo Bonzini struct kvm_irq_routing_table;
1265647300SPaolo Bonzini struct kvm_memory_slot;
1365647300SPaolo Bonzini struct kvm_one_reg;
1465647300SPaolo Bonzini struct kvm_run;
1565647300SPaolo Bonzini struct kvm_userspace_memory_region;
1665647300SPaolo Bonzini struct kvm_vcpu;
1765647300SPaolo Bonzini struct kvm_vcpu_init;
1815f46015SPaolo Bonzini struct kvm_memslots;
1965647300SPaolo Bonzini 
2065647300SPaolo Bonzini enum kvm_mr_change;
2165647300SPaolo Bonzini 
22d0d96121SSean Christopherson #include <linux/bits.h>
2393984f19SSean Christopherson #include <linux/mutex.h>
2491724814SBoris Ostrovsky #include <linux/types.h>
25982ed0deSDavid Woodhouse #include <linux/spinlock_types.h>
26edf88417SAvi Kivity 
272aa9c199SSean Christopherson #include <asm/kvm_types.h>
282aa9c199SSean Christopherson 
29edf88417SAvi Kivity /*
30edf88417SAvi Kivity  * Address types:
31edf88417SAvi Kivity  *
32edf88417SAvi Kivity  *  gva - guest virtual address
33edf88417SAvi Kivity  *  gpa - guest physical address
34edf88417SAvi Kivity  *  gfn - guest frame number
35edf88417SAvi Kivity  *  hva - host virtual address
36edf88417SAvi Kivity  *  hpa - host physical address
37edf88417SAvi Kivity  *  hfn - host frame number
38edf88417SAvi Kivity  */
39edf88417SAvi Kivity 
40edf88417SAvi Kivity typedef unsigned long  gva_t;
41edf88417SAvi Kivity typedef u64            gpa_t;
425689cc53SJoerg Roedel typedef u64            gfn_t;
43edf88417SAvi Kivity 
44cecafc0aSYu Zhang #define INVALID_GPA	(~(gpa_t)0)
458564d637SSteven Price 
46edf88417SAvi Kivity typedef unsigned long  hva_t;
47edf88417SAvi Kivity typedef u64            hpa_t;
485689cc53SJoerg Roedel typedef u64            hfn_t;
49edf88417SAvi Kivity 
50ba049e93SDan Williams typedef hfn_t kvm_pfn_t;
5135149e21SAnthony Liguori 
5249c7754cSGleb Natapov struct gfn_to_hva_cache {
5349c7754cSGleb Natapov 	u64 generation;
5449c7754cSGleb Natapov 	gpa_t gpa;
5549c7754cSGleb Natapov 	unsigned long hva;
568f964525SAndrew Honig 	unsigned long len;
5749c7754cSGleb Natapov 	struct kvm_memory_slot *memslot;
5849c7754cSGleb Natapov };
5949c7754cSGleb Natapov 
60982ed0deSDavid Woodhouse struct gfn_to_pfn_cache {
61982ed0deSDavid Woodhouse 	u64 generation;
62982ed0deSDavid Woodhouse 	gpa_t gpa;
63982ed0deSDavid Woodhouse 	unsigned long uhva;
64982ed0deSDavid Woodhouse 	struct kvm_memory_slot *memslot;
658c82a0b3SMichal Luczaj 	struct kvm *kvm;
66982ed0deSDavid Woodhouse 	struct list_head list;
67982ed0deSDavid Woodhouse 	rwlock_t lock;
6893984f19SSean Christopherson 	struct mutex refresh_lock;
69982ed0deSDavid Woodhouse 	void *khva;
70982ed0deSDavid Woodhouse 	kvm_pfn_t pfn;
71982ed0deSDavid Woodhouse 	bool active;
72982ed0deSDavid Woodhouse 	bool valid;
73982ed0deSDavid Woodhouse };
74982ed0deSDavid Woodhouse 
752aa9c199SSean Christopherson #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
762aa9c199SSean Christopherson /*
772aa9c199SSean Christopherson  * Memory caches are used to preallocate memory ahead of various MMU flows,
782aa9c199SSean Christopherson  * e.g. page fault handlers.  Gracefully handling allocation failures deep in
792aa9c199SSean Christopherson  * MMU flows is problematic, as is triggering reclaim, I/O, etc... while
802aa9c199SSean Christopherson  * holding MMU locks.  Note, these caches act more like prefetch buffers than
812aa9c199SSean Christopherson  * classical caches, i.e. objects are not returned to the cache on being freed.
82837f66c7SDavid Matlack  *
83837f66c7SDavid Matlack  * The @capacity field and @objects array are lazily initialized when the cache
84837f66c7SDavid Matlack  * is topped up (__kvm_mmu_topup_memory_cache()).
852aa9c199SSean Christopherson  */
862aa9c199SSean Christopherson struct kvm_mmu_memory_cache {
872aa9c199SSean Christopherson 	gfp_t gfp_zero;
884ab0e470SAnup Patel 	gfp_t gfp_custom;
89*c23e2b71SSean Christopherson 	u64 init_value;
902aa9c199SSean Christopherson 	struct kmem_cache *kmem_cache;
91837f66c7SDavid Matlack 	int capacity;
92f530b531SMathias Krause 	int nobjs;
93837f66c7SDavid Matlack 	void **objects;
942aa9c199SSean Christopherson };
952aa9c199SSean Christopherson #endif
962aa9c199SSean Christopherson 
978ccba534SJing Zhang #define HALT_POLL_HIST_COUNT			32
988ccba534SJing Zhang 
990193cc90SJing Zhang struct kvm_vm_stat_generic {
1000193cc90SJing Zhang 	u64 remote_tlb_flush;
1013cc4e148SJing Zhang 	u64 remote_tlb_flush_requests;
1020193cc90SJing Zhang };
1030193cc90SJing Zhang 
1040193cc90SJing Zhang struct kvm_vcpu_stat_generic {
1050193cc90SJing Zhang 	u64 halt_successful_poll;
1060193cc90SJing Zhang 	u64 halt_attempted_poll;
1070193cc90SJing Zhang 	u64 halt_poll_invalid;
1080193cc90SJing Zhang 	u64 halt_wakeup;
1090193cc90SJing Zhang 	u64 halt_poll_success_ns;
1100193cc90SJing Zhang 	u64 halt_poll_fail_ns;
11187bcc5faSJing Zhang 	u64 halt_wait_ns;
1128ccba534SJing Zhang 	u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT];
1138ccba534SJing Zhang 	u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT];
1148ccba534SJing Zhang 	u64 halt_wait_hist[HALT_POLL_HIST_COUNT];
115c3858335SJing Zhang 	u64 blocking;
1160193cc90SJing Zhang };
1172aa9c199SSean Christopherson 
118cb082bfaSJing Zhang #define KVM_STATS_NAME_SIZE	48
119cb082bfaSJing Zhang 
120edf88417SAvi Kivity #endif /* __KVM_TYPES_H__ */
121