Merge tag 'platform-drivers-x86-v5.18-1' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / include / linux / kvm_types.h
CommitLineData
cd93f165 1/* SPDX-License-Identifier: GPL-2.0-only */
d77a39d9
HB
2
3#ifndef __KVM_TYPES_H__
4#define __KVM_TYPES_H__
5
65647300
PB
6struct kvm;
7struct kvm_async_pf;
8struct kvm_device_ops;
9struct kvm_interrupt;
10struct kvm_irq_routing_table;
11struct kvm_memory_slot;
12struct kvm_one_reg;
13struct kvm_run;
14struct kvm_userspace_memory_region;
15struct kvm_vcpu;
16struct kvm_vcpu_init;
15f46015 17struct kvm_memslots;
65647300
PB
18
19enum kvm_mr_change;
20
91724814 21#include <linux/types.h>
982ed0de 22#include <linux/spinlock_types.h>
d77a39d9 23
2aa9c199
SC
24#include <asm/kvm_types.h>
25
d77a39d9
HB
26/*
27 * Address types:
28 *
29 * gva - guest virtual address
30 * gpa - guest physical address
31 * gfn - guest frame number
32 * hva - host virtual address
33 * hpa - host physical address
34 * hfn - host frame number
35 */
36
37typedef unsigned long gva_t;
38typedef u64 gpa_t;
5689cc53 39typedef u64 gfn_t;
d77a39d9 40
8564d637
SP
41#define GPA_INVALID (~(gpa_t)0)
42
d77a39d9
HB
43typedef unsigned long hva_t;
44typedef u64 hpa_t;
5689cc53 45typedef u64 hfn_t;
d77a39d9 46
ba049e93 47typedef hfn_t kvm_pfn_t;
35149e21 48
49c7754c
GN
49struct gfn_to_hva_cache {
50 u64 generation;
51 gpa_t gpa;
52 unsigned long hva;
8f964525 53 unsigned long len;
49c7754c
GN
54 struct kvm_memory_slot *memslot;
55};
56
982ed0de
DW
57struct gfn_to_pfn_cache {
58 u64 generation;
59 gpa_t gpa;
60 unsigned long uhva;
61 struct kvm_memory_slot *memslot;
62 struct kvm_vcpu *vcpu;
63 struct list_head list;
64 rwlock_t lock;
65 void *khva;
66 kvm_pfn_t pfn;
67 bool active;
68 bool valid;
69 bool dirty;
70 bool kernel_map;
71 bool guest_uses_pa;
72};
73
2aa9c199
SC
74#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
75/*
76 * Memory caches are used to preallocate memory ahead of various MMU flows,
77 * e.g. page fault handlers. Gracefully handling allocation failures deep in
78 * MMU flows is problematic, as is triggering reclaim, I/O, etc... while
79 * holding MMU locks. Note, these caches act more like prefetch buffers than
80 * classical caches, i.e. objects are not returned to the cache on being freed.
81 */
82struct kvm_mmu_memory_cache {
83 int nobjs;
84 gfp_t gfp_zero;
85 struct kmem_cache *kmem_cache;
86 void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE];
87};
88#endif
89
8ccba534
JZ
90#define HALT_POLL_HIST_COUNT 32
91
0193cc90
JZ
92struct kvm_vm_stat_generic {
93 u64 remote_tlb_flush;
3cc4e148 94 u64 remote_tlb_flush_requests;
0193cc90
JZ
95};
96
97struct kvm_vcpu_stat_generic {
98 u64 halt_successful_poll;
99 u64 halt_attempted_poll;
100 u64 halt_poll_invalid;
101 u64 halt_wakeup;
102 u64 halt_poll_success_ns;
103 u64 halt_poll_fail_ns;
87bcc5fa 104 u64 halt_wait_ns;
8ccba534
JZ
105 u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT];
106 u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT];
107 u64 halt_wait_hist[HALT_POLL_HIST_COUNT];
c3858335 108 u64 blocking;
0193cc90 109};
2aa9c199 110
cb082bfa
JZ
111#define KVM_STATS_NAME_SIZE 48
112
d77a39d9 113#endif /* __KVM_TYPES_H__ */