Merge branch 'address-masking'
[linux-2.6-block.git] / include / linux / kvm_types.h
CommitLineData
cd93f165 1/* SPDX-License-Identifier: GPL-2.0-only */
d77a39d9
HB
2
3#ifndef __KVM_TYPES_H__
4#define __KVM_TYPES_H__
5
65647300
PB
6struct kvm;
7struct kvm_async_pf;
8struct kvm_device_ops;
f128cf8c 9struct kvm_gfn_range;
65647300
PB
10struct kvm_interrupt;
11struct kvm_irq_routing_table;
12struct kvm_memory_slot;
13struct kvm_one_reg;
14struct kvm_run;
15struct kvm_userspace_memory_region;
16struct kvm_vcpu;
17struct kvm_vcpu_init;
15f46015 18struct kvm_memslots;
65647300
PB
19
20enum kvm_mr_change;
21
d0d96121 22#include <linux/bits.h>
93984f19 23#include <linux/mutex.h>
91724814 24#include <linux/types.h>
982ed0de 25#include <linux/spinlock_types.h>
d77a39d9 26
2aa9c199
SC
27#include <asm/kvm_types.h>
28
d77a39d9
HB
29/*
30 * Address types:
31 *
32 * gva - guest virtual address
33 * gpa - guest physical address
34 * gfn - guest frame number
35 * hva - host virtual address
36 * hpa - host physical address
37 * hfn - host frame number
38 */
39
40typedef unsigned long gva_t;
41typedef u64 gpa_t;
5689cc53 42typedef u64 gfn_t;
d77a39d9 43
cecafc0a 44#define INVALID_GPA (~(gpa_t)0)
8564d637 45
d77a39d9
HB
46typedef unsigned long hva_t;
47typedef u64 hpa_t;
5689cc53 48typedef u64 hfn_t;
d77a39d9 49
ba049e93 50typedef hfn_t kvm_pfn_t;
35149e21 51
49c7754c
GN
52struct gfn_to_hva_cache {
53 u64 generation;
54 gpa_t gpa;
55 unsigned long hva;
8f964525 56 unsigned long len;
49c7754c
GN
57 struct kvm_memory_slot *memslot;
58};
59
982ed0de
DW
60struct gfn_to_pfn_cache {
61 u64 generation;
62 gpa_t gpa;
63 unsigned long uhva;
64 struct kvm_memory_slot *memslot;
8c82a0b3 65 struct kvm *kvm;
982ed0de
DW
66 struct list_head list;
67 rwlock_t lock;
93984f19 68 struct mutex refresh_lock;
982ed0de
DW
69 void *khva;
70 kvm_pfn_t pfn;
71 bool active;
72 bool valid;
982ed0de
DW
73};
74
2aa9c199
SC
75#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
76/*
77 * Memory caches are used to preallocate memory ahead of various MMU flows,
78 * e.g. page fault handlers. Gracefully handling allocation failures deep in
79 * MMU flows is problematic, as is triggering reclaim, I/O, etc... while
80 * holding MMU locks. Note, these caches act more like prefetch buffers than
81 * classical caches, i.e. objects are not returned to the cache on being freed.
837f66c7
DM
82 *
83 * The @capacity field and @objects array are lazily initialized when the cache
84 * is topped up (__kvm_mmu_topup_memory_cache()).
2aa9c199
SC
85 */
86struct kvm_mmu_memory_cache {
2aa9c199 87 gfp_t gfp_zero;
4ab0e470 88 gfp_t gfp_custom;
c23e2b71 89 u64 init_value;
2aa9c199 90 struct kmem_cache *kmem_cache;
837f66c7 91 int capacity;
f530b531 92 int nobjs;
837f66c7 93 void **objects;
2aa9c199
SC
94};
95#endif
96
8ccba534
JZ
97#define HALT_POLL_HIST_COUNT 32
98
0193cc90
JZ
99struct kvm_vm_stat_generic {
100 u64 remote_tlb_flush;
3cc4e148 101 u64 remote_tlb_flush_requests;
0193cc90
JZ
102};
103
104struct kvm_vcpu_stat_generic {
105 u64 halt_successful_poll;
106 u64 halt_attempted_poll;
107 u64 halt_poll_invalid;
108 u64 halt_wakeup;
109 u64 halt_poll_success_ns;
110 u64 halt_poll_fail_ns;
87bcc5fa 111 u64 halt_wait_ns;
8ccba534
JZ
112 u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT];
113 u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT];
114 u64 halt_wait_hist[HALT_POLL_HIST_COUNT];
c3858335 115 u64 blocking;
0193cc90 116};
2aa9c199 117
cb082bfa
JZ
118#define KVM_STATS_NAME_SIZE 48
119
d77a39d9 120#endif /* __KVM_TYPES_H__ */