Merge tag 'riscv-for-linus-5.17-mw0' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / include / linux / kvm_types.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2
3 #ifndef __KVM_TYPES_H__
4 #define __KVM_TYPES_H__
5
6 struct kvm;
7 struct kvm_async_pf;
8 struct kvm_device_ops;
9 struct kvm_interrupt;
10 struct kvm_irq_routing_table;
11 struct kvm_memory_slot;
12 struct kvm_one_reg;
13 struct kvm_run;
14 struct kvm_userspace_memory_region;
15 struct kvm_vcpu;
16 struct kvm_vcpu_init;
17 struct kvm_memslots;
18
19 enum kvm_mr_change;
20
21 #include <linux/types.h>
22 #include <linux/spinlock_types.h>
23
24 #include <asm/kvm_types.h>
25
26 /*
27  * Address types:
28  *
29  *  gva - guest virtual address
30  *  gpa - guest physical address
31  *  gfn - guest frame number
32  *  hva - host virtual address
33  *  hpa - host physical address
34  *  hfn - host frame number
35  */
36
37 typedef unsigned long  gva_t;
38 typedef u64            gpa_t;
39 typedef u64            gfn_t;
40
41 #define GPA_INVALID     (~(gpa_t)0)
42
43 typedef unsigned long  hva_t;
44 typedef u64            hpa_t;
45 typedef u64            hfn_t;
46
47 typedef hfn_t kvm_pfn_t;
48
49 struct gfn_to_hva_cache {
50         u64 generation;
51         gpa_t gpa;
52         unsigned long hva;
53         unsigned long len;
54         struct kvm_memory_slot *memslot;
55 };
56
57 struct gfn_to_pfn_cache {
58         u64 generation;
59         gpa_t gpa;
60         unsigned long uhva;
61         struct kvm_memory_slot *memslot;
62         struct kvm_vcpu *vcpu;
63         struct list_head list;
64         rwlock_t lock;
65         void *khva;
66         kvm_pfn_t pfn;
67         bool active;
68         bool valid;
69         bool dirty;
70         bool kernel_map;
71         bool guest_uses_pa;
72 };
73
74 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
75 /*
76  * Memory caches are used to preallocate memory ahead of various MMU flows,
77  * e.g. page fault handlers.  Gracefully handling allocation failures deep in
78  * MMU flows is problematic, as is triggering reclaim, I/O, etc... while
79  * holding MMU locks.  Note, these caches act more like prefetch buffers than
80  * classical caches, i.e. objects are not returned to the cache on being freed.
81  */
82 struct kvm_mmu_memory_cache {
83         int nobjs;
84         gfp_t gfp_zero;
85         struct kmem_cache *kmem_cache;
86         void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE];
87 };
88 #endif
89
90 #define HALT_POLL_HIST_COUNT                    32
91
92 struct kvm_vm_stat_generic {
93         u64 remote_tlb_flush;
94         u64 remote_tlb_flush_requests;
95 };
96
97 struct kvm_vcpu_stat_generic {
98         u64 halt_successful_poll;
99         u64 halt_attempted_poll;
100         u64 halt_poll_invalid;
101         u64 halt_wakeup;
102         u64 halt_poll_success_ns;
103         u64 halt_poll_fail_ns;
104         u64 halt_wait_ns;
105         u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT];
106         u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT];
107         u64 halt_wait_hist[HALT_POLL_HIST_COUNT];
108         u64 blocking;
109 };
110
111 #define KVM_STATS_NAME_SIZE     48
112
113 #endif /* __KVM_TYPES_H__ */