1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #ifndef __KVM_TYPES_H__
4 #define __KVM_TYPES_H__
10 struct kvm_irq_routing_table;
11 struct kvm_memory_slot;
14 struct kvm_userspace_memory_region;
21 #include <linux/types.h>
22 #include <linux/spinlock_types.h>
24 #include <asm/kvm_types.h>
29 * gva - guest virtual address
30 * gpa - guest physical address
31 * gfn - guest frame number
32 * hva - host virtual address
33 * hpa - host physical address
34 * hfn - host frame number
37 typedef unsigned long gva_t;
41 #define GPA_INVALID (~(gpa_t)0)
43 typedef unsigned long hva_t;
47 typedef hfn_t kvm_pfn_t;
49 struct gfn_to_hva_cache {
54 struct kvm_memory_slot *memslot;
57 struct gfn_to_pfn_cache {
61 struct kvm_memory_slot *memslot;
62 struct kvm_vcpu *vcpu;
63 struct list_head list;
74 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
76 * Memory caches are used to preallocate memory ahead of various MMU flows,
77 * e.g. page fault handlers. Gracefully handling allocation failures deep in
78 * MMU flows is problematic, as is triggering reclaim, I/O, etc... while
79 * holding MMU locks. Note, these caches act more like prefetch buffers than
80 * classical caches, i.e. objects are not returned to the cache on being freed.
82 struct kvm_mmu_memory_cache {
85 struct kmem_cache *kmem_cache;
86 void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE];
90 #define HALT_POLL_HIST_COUNT 32
92 struct kvm_vm_stat_generic {
94 u64 remote_tlb_flush_requests;
97 struct kvm_vcpu_stat_generic {
98 u64 halt_successful_poll;
99 u64 halt_attempted_poll;
100 u64 halt_poll_invalid;
102 u64 halt_poll_success_ns;
103 u64 halt_poll_fail_ns;
105 u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT];
106 u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT];
107 u64 halt_wait_hist[HALT_POLL_HIST_COUNT];
111 #define KVM_STATS_NAME_SIZE 48
113 #endif /* __KVM_TYPES_H__ */