Commit | Line | Data |
---|---|---|
20c8ccb1 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
edf88417 AK |
2 | #ifndef __KVM_HOST_H |
3 | #define __KVM_HOST_H | |
6aa8b732 | 4 | |
6aa8b732 AK |
5 | |
6 | #include <linux/types.h> | |
e56a7a28 | 7 | #include <linux/hardirq.h> |
6aa8b732 AK |
8 | #include <linux/list.h> |
9 | #include <linux/mutex.h> | |
10 | #include <linux/spinlock.h> | |
06ff0d37 MR |
11 | #include <linux/signal.h> |
12 | #include <linux/sched.h> | |
6bd5b743 | 13 | #include <linux/sched/stat.h> |
187f1882 | 14 | #include <linux/bug.h> |
4a42d848 | 15 | #include <linux/minmax.h> |
6aa8b732 | 16 | #include <linux/mm.h> |
b297e672 | 17 | #include <linux/mmu_notifier.h> |
15ad7146 | 18 | #include <linux/preempt.h> |
0937c48d | 19 | #include <linux/msi.h> |
d89f5eff | 20 | #include <linux/slab.h> |
d1e5b0e9 | 21 | #include <linux/vmalloc.h> |
bd2b53b2 | 22 | #include <linux/rcupdate.h> |
bd80158a | 23 | #include <linux/ratelimit.h> |
83f09228 | 24 | #include <linux/err.h> |
c11f11fc | 25 | #include <linux/irqflags.h> |
521921ba | 26 | #include <linux/context_tracking.h> |
1a02b270 | 27 | #include <linux/irqbypass.h> |
da4ad88c | 28 | #include <linux/rcuwait.h> |
e3736c3e | 29 | #include <linux/refcount.h> |
1d487e9b | 30 | #include <linux/nospec.h> |
2fdef3a2 | 31 | #include <linux/notifier.h> |
ef9989af | 32 | #include <linux/ftrace.h> |
26b8345a | 33 | #include <linux/hashtable.h> |
ef9989af | 34 | #include <linux/instrumentation.h> |
ed922739 | 35 | #include <linux/interval_tree.h> |
a54d8066 | 36 | #include <linux/rbtree.h> |
c5b07754 | 37 | #include <linux/xarray.h> |
e8edc6e0 | 38 | #include <asm/signal.h> |
6aa8b732 | 39 | |
6aa8b732 | 40 | #include <linux/kvm.h> |
102d8325 | 41 | #include <linux/kvm_para.h> |
6aa8b732 | 42 | |
edf88417 | 43 | #include <linux/kvm_types.h> |
d77a39d9 | 44 | |
edf88417 | 45 | #include <asm/kvm_host.h> |
fb04a1ed | 46 | #include <linux/kvm_dirty_ring.h> |
d657a98e | 47 | |
a1c42dde JG |
48 | #ifndef KVM_MAX_VCPU_IDS |
49 | #define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS | |
0b1b1dfd GK |
50 | #endif |
51 | ||
67b29204 | 52 | /* |
30ee198c JMC |
53 | * The bit 16 ~ bit 31 of kvm_userspace_memory_region::flags are internally |
54 | * used in kvm, other bits are visible for userspace which are defined in | |
67b29204 XG |
55 | * include/linux/kvm_h. |
56 | */ | |
57 | #define KVM_MEMSLOT_INVALID (1UL << 16) | |
58 | ||
361209e0 | 59 | /* |
164bf7e5 | 60 | * Bit 63 of the memslot generation number is an "update in-progress flag", |
b0d23708 | 61 | * e.g. is temporarily set for the duration of kvm_swap_active_memslots(). |
361209e0 SC |
62 | * This flag effectively creates a unique generation number that is used to |
63 | * mark cached memslot data, e.g. MMIO accesses, as potentially being stale, | |
64 | * i.e. may (or may not) have come from the previous memslots generation. | |
65 | * | |
66 | * This is necessary because the actual memslots update is not atomic with | |
67 | * respect to the generation number update. Updating the generation number | |
68 | * first would allow a vCPU to cache a spte from the old memslots using the | |
69 | * new generation number, and updating the generation number after switching | |
70 | * to the new memslots would allow cache hits using the old generation number | |
71 | * to reference the defunct memslots. | |
72 | * | |
73 | * This mechanism is used to prevent getting hits in KVM's caches while a | |
74 | * memslot update is in-progress, and to prevent cache hits *after* updating | |
75 | * the actual generation number against accesses that were inserted into the | |
76 | * cache *before* the memslots were updated. | |
77 | */ | |
164bf7e5 | 78 | #define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63) |
361209e0 | 79 | |
87da7e66 XG |
80 | /* Two fragments for cross MMIO pages. */ |
81 | #define KVM_MAX_MMIO_FRAGMENTS 2 | |
f78146b0 | 82 | |
eed52e43 SC |
83 | #ifndef KVM_MAX_NR_ADDRESS_SPACES |
84 | #define KVM_MAX_NR_ADDRESS_SPACES 1 | |
f481b069 PB |
85 | #endif |
86 | ||
9c5b1172 XG |
87 | /* |
88 | * For the normal pfn, the highest 12 bits should be zero, | |
81c52c56 XG |
89 | * so we can mask bit 62 ~ bit 52 to indicate the error pfn, |
90 | * mask bit 63 to indicate the noslot pfn. | |
9c5b1172 | 91 | */ |
81c52c56 XG |
92 | #define KVM_PFN_ERR_MASK (0x7ffULL << 52) |
93 | #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) | |
94 | #define KVM_PFN_NOSLOT (0x1ULL << 63) | |
9c5b1172 XG |
95 | |
96 | #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) | |
97 | #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) | |
81c52c56 | 98 | #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) |
fe5ed56c | 99 | #define KVM_PFN_ERR_SIGPENDING (KVM_PFN_ERR_MASK + 3) |
6c8ee57b | 100 | |
81c52c56 XG |
101 | /* |
102 | * error pfns indicate that the gfn is in slot but faild to | |
103 | * translate it to pfn on host. | |
104 | */ | |
ba049e93 | 105 | static inline bool is_error_pfn(kvm_pfn_t pfn) |
83f09228 | 106 | { |
9c5b1172 | 107 | return !!(pfn & KVM_PFN_ERR_MASK); |
83f09228 XG |
108 | } |
109 | ||
fe5ed56c PX |
110 | /* |
111 | * KVM_PFN_ERR_SIGPENDING indicates that fetching the PFN was interrupted | |
112 | * by a pending signal. Note, the signal may or may not be fatal. | |
113 | */ | |
114 | static inline bool is_sigpending_pfn(kvm_pfn_t pfn) | |
115 | { | |
116 | return pfn == KVM_PFN_ERR_SIGPENDING; | |
117 | } | |
118 | ||
81c52c56 XG |
119 | /* |
120 | * error_noslot pfns indicate that the gfn can not be | |
121 | * translated to pfn - it is not in slot or failed to | |
122 | * translate it to pfn. | |
123 | */ | |
ba049e93 | 124 | static inline bool is_error_noslot_pfn(kvm_pfn_t pfn) |
83f09228 | 125 | { |
81c52c56 | 126 | return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); |
83f09228 XG |
127 | } |
128 | ||
81c52c56 | 129 | /* noslot pfn indicates that the gfn is not in slot. */ |
ba049e93 | 130 | static inline bool is_noslot_pfn(kvm_pfn_t pfn) |
83f09228 | 131 | { |
81c52c56 | 132 | return pfn == KVM_PFN_NOSLOT; |
83f09228 XG |
133 | } |
134 | ||
bf640876 DD |
135 | /* |
136 | * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390) | |
137 | * provide own defines and kvm_is_error_hva | |
138 | */ | |
139 | #ifndef KVM_HVA_ERR_BAD | |
140 | ||
7068d097 XG |
141 | #define KVM_HVA_ERR_BAD (PAGE_OFFSET) |
142 | #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) | |
ca3a490c XG |
143 | |
144 | static inline bool kvm_is_error_hva(unsigned long addr) | |
145 | { | |
7068d097 | 146 | return addr >= PAGE_OFFSET; |
ca3a490c XG |
147 | } |
148 | ||
bf640876 DD |
149 | #endif |
150 | ||
721f5b0d PD |
151 | static inline bool kvm_is_error_gpa(gpa_t gpa) |
152 | { | |
153 | return gpa == INVALID_GPA; | |
154 | } | |
155 | ||
6cede2e6 XG |
156 | #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) |
157 | ||
9c5b1172 | 158 | static inline bool is_error_page(struct page *page) |
6cede2e6 XG |
159 | { |
160 | return IS_ERR(page); | |
161 | } | |
162 | ||
930f7fd6 RK |
163 | #define KVM_REQUEST_MASK GENMASK(7,0) |
164 | #define KVM_REQUEST_NO_WAKEUP BIT(8) | |
7a97cec2 | 165 | #define KVM_REQUEST_WAIT BIT(9) |
df06dae3 | 166 | #define KVM_REQUEST_NO_ACTION BIT(10) |
d9e368d6 | 167 | /* |
2860c4b1 | 168 | * Architecture-independent vcpu->requests bit members |
c59fb127 | 169 | * Bits 3-7 are reserved for more arch-independent bits. |
d9e368d6 | 170 | */ |
cf87ac73 GS |
171 | #define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
172 | #define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | |
173 | #define KVM_REQ_UNBLOCK 2 | |
174 | #define KVM_REQ_DIRTY_RING_SOFT_FULL 3 | |
175 | #define KVM_REQUEST_ARCH_BASE 8 | |
2387149e | 176 | |
df06dae3 SC |
177 | /* |
178 | * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to | |
179 | * OUTSIDE_GUEST_MODE. KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick" | |
180 | * in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing | |
181 | * on. A kick only guarantees that the vCPU is on its way out, e.g. a previous | |
182 | * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no | |
183 | * guarantee the vCPU received an IPI and has actually exited guest mode. | |
184 | */ | |
185 | #define KVM_REQ_OUTSIDE_GUEST_MODE (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | |
186 | ||
2387149e | 187 | #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ |
c593642c | 188 | BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \ |
2387149e AJ |
189 | (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \ |
190 | }) | |
191 | #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0) | |
0cd31043 | 192 | |
7ee3e8c3 | 193 | bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, |
620b2438 | 194 | unsigned long *vcpu_bitmap); |
7ee3e8c3 | 195 | bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); |
7ee3e8c3 | 196 | |
7a84428a AW |
197 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
198 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 | |
5550af4d | 199 | |
0d9ce162 | 200 | extern struct mutex kvm_lock; |
fc1b7492 GL |
201 | extern struct list_head vm_list; |
202 | ||
743eeb0b SL |
203 | struct kvm_io_range { |
204 | gpa_t addr; | |
205 | int len; | |
206 | struct kvm_io_device *dev; | |
207 | }; | |
208 | ||
786a9f88 | 209 | #define NR_IOBUS_DEVS 1000 |
a1300716 | 210 | |
2eeb2e94 | 211 | struct kvm_io_bus { |
6ea34c9b AK |
212 | int dev_count; |
213 | int ioeventfd_count; | |
a1300716 | 214 | struct kvm_io_range range[]; |
2eeb2e94 GH |
215 | }; |
216 | ||
e93f8a0f MT |
217 | enum kvm_bus { |
218 | KVM_MMIO_BUS, | |
219 | KVM_PIO_BUS, | |
060f0ce6 | 220 | KVM_VIRTIO_CCW_NOTIFY_BUS, |
68c3b4d1 | 221 | KVM_FAST_MMIO_BUS, |
e93f8a0f MT |
222 | KVM_NR_BUSES |
223 | }; | |
224 | ||
e32edf4f | 225 | int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, |
e93f8a0f | 226 | int len, const void *val); |
e32edf4f NN |
227 | int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, |
228 | gpa_t addr, int len, const void *val, long cookie); | |
229 | int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, | |
230 | int len, void *val); | |
743eeb0b SL |
231 | int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, |
232 | int len, struct kvm_io_device *dev); | |
5d3c4c79 SC |
233 | int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
234 | struct kvm_io_device *dev); | |
8a39d006 AP |
235 | struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
236 | gpa_t addr); | |
2eeb2e94 | 237 | |
af585b92 GN |
238 | #ifdef CONFIG_KVM_ASYNC_PF |
239 | struct kvm_async_pf { | |
240 | struct work_struct work; | |
241 | struct list_head link; | |
242 | struct list_head queue; | |
243 | struct kvm_vcpu *vcpu; | |
736c291c | 244 | gpa_t cr2_or_gpa; |
af585b92 GN |
245 | unsigned long addr; |
246 | struct kvm_arch_async_pf arch; | |
f2e10669 | 247 | bool wakeup_all; |
2a18b7e7 | 248 | bool notpresent_injected; |
af585b92 GN |
249 | }; |
250 | ||
251 | void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); | |
252 | void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); | |
e8c22266 VK |
253 | bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, |
254 | unsigned long hva, struct kvm_arch_async_pf *arch); | |
344d9588 | 255 | int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); |
af585b92 GN |
256 | #endif |
257 | ||
f128cf8c | 258 | #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER |
3e1efe2b | 259 | union kvm_mmu_notifier_arg { |
5a475554 | 260 | unsigned long attributes; |
3e1efe2b SC |
261 | }; |
262 | ||
3039bcc7 SC |
263 | struct kvm_gfn_range { |
264 | struct kvm_memory_slot *slot; | |
265 | gfn_t start; | |
266 | gfn_t end; | |
3e1efe2b | 267 | union kvm_mmu_notifier_arg arg; |
3039bcc7 SC |
268 | bool may_block; |
269 | }; | |
270 | bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); | |
271 | bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); | |
272 | bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); | |
5f7c292b SC |
273 | #endif |
274 | ||
6b7e2d09 XG |
275 | enum { |
276 | OUTSIDE_GUEST_MODE, | |
277 | IN_GUEST_MODE, | |
c142786c AK |
278 | EXITING_GUEST_MODE, |
279 | READING_SHADOW_PAGE_TABLES, | |
6b7e2d09 XG |
280 | }; |
281 | ||
e45adf66 KA |
282 | #define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA) |
283 | ||
284 | struct kvm_host_map { | |
285 | /* | |
286 | * Only valid if the 'pfn' is managed by the host kernel (i.e. There is | |
287 | * a 'struct page' for it. When using mem= kernel parameter some memory | |
288 | * can be used as guest memory but they are not managed by host | |
289 | * kernel). | |
290 | * If 'pfn' is not managed by the host kernel, this field is | |
291 | * initialized to KVM_UNMAPPED_PAGE. | |
292 | */ | |
293 | struct page *page; | |
294 | void *hva; | |
295 | kvm_pfn_t pfn; | |
296 | kvm_pfn_t gfn; | |
297 | }; | |
298 | ||
299 | /* | |
300 | * Used to check if the mapping is valid or not. Never use 'kvm_host_map' | |
301 | * directly to check for that. | |
302 | */ | |
303 | static inline bool kvm_vcpu_mapped(struct kvm_host_map *map) | |
304 | { | |
305 | return !!map->hva; | |
306 | } | |
307 | ||
6bd5b743 WL |
308 | static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop) |
309 | { | |
310 | return single_task_running() && !need_resched() && ktime_before(cur, stop); | |
311 | } | |
312 | ||
f78146b0 AK |
313 | /* |
314 | * Sometimes a large or cross-page mmio needs to be broken up into separate | |
315 | * exits for userspace servicing. | |
316 | */ | |
317 | struct kvm_mmio_fragment { | |
318 | gpa_t gpa; | |
319 | void *data; | |
320 | unsigned len; | |
321 | }; | |
322 | ||
d17fbbf7 ZX |
323 | struct kvm_vcpu { |
324 | struct kvm *kvm; | |
31bb117e | 325 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
d17fbbf7 | 326 | struct preempt_notifier preempt_notifier; |
31bb117e | 327 | #endif |
6b7e2d09 | 328 | int cpu; |
8750e72a | 329 | int vcpu_id; /* id given by userspace at creation */ |
5bad5d55 | 330 | int vcpu_idx; /* index into kvm->vcpu_array */ |
2031f287 SC |
331 | int ____srcu_idx; /* Don't use this directly. You've been warned. */ |
332 | #ifdef CONFIG_PROVE_RCU | |
333 | int srcu_depth; | |
334 | #endif | |
6b7e2d09 | 335 | int mode; |
86dafed5 | 336 | u64 requests; |
d0bfb940 | 337 | unsigned long guest_debug; |
6b7e2d09 XG |
338 | |
339 | struct mutex mutex; | |
340 | struct kvm_run *run; | |
f656ce01 | 341 | |
510958e9 | 342 | #ifndef __KVM_HAVE_ARCH_WQP |
da4ad88c | 343 | struct rcuwait wait; |
510958e9 | 344 | #endif |
0e4524a5 | 345 | struct pid __rcu *pid; |
d17fbbf7 ZX |
346 | int sigset_active; |
347 | sigset_t sigset; | |
19020f8a | 348 | unsigned int halt_poll_ns; |
3491caf2 | 349 | bool valid_wakeup; |
d17fbbf7 | 350 | |
34c16eec | 351 | #ifdef CONFIG_HAS_IOMEM |
d17fbbf7 ZX |
352 | int mmio_needed; |
353 | int mmio_read_completed; | |
354 | int mmio_is_write; | |
f78146b0 AK |
355 | int mmio_cur_fragment; |
356 | int mmio_nr_fragments; | |
357 | struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; | |
34c16eec | 358 | #endif |
1165f5fe | 359 | |
af585b92 GN |
360 | #ifdef CONFIG_KVM_ASYNC_PF |
361 | struct { | |
362 | u32 queued; | |
363 | struct list_head queue; | |
364 | struct list_head done; | |
365 | spinlock_t lock; | |
366 | } async_pf; | |
367 | #endif | |
368 | ||
4c088493 R |
369 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT |
370 | /* | |
371 | * Cpu relax intercept or pause loop exit optimization | |
372 | * in_spin_loop: set when a vcpu does a pause loop exit | |
373 | * or cpu relax intercepted. | |
374 | * dy_eligible: indicates whether vcpu is eligible for directed yield. | |
375 | */ | |
376 | struct { | |
377 | bool in_spin_loop; | |
378 | bool dy_eligible; | |
379 | } spin_loop; | |
380 | #endif | |
3a08a8f9 | 381 | bool preempted; |
d73eb57b | 382 | bool ready; |
d657a98e | 383 | struct kvm_vcpu_arch arch; |
ce55c049 JZ |
384 | struct kvm_vcpu_stat stat; |
385 | char stats_id[KVM_STATS_NAME_SIZE]; | |
fb04a1ed | 386 | struct kvm_dirty_ring dirty_ring; |
fe22ed82 DM |
387 | |
388 | /* | |
a54d8066 MS |
389 | * The most recently used memslot by this vCPU and the slots generation |
390 | * for which it is valid. | |
391 | * No wraparound protection is needed since generations won't overflow in | |
392 | * thousands of years, even assuming 1M memslot operations per second. | |
fe22ed82 | 393 | */ |
a54d8066 MS |
394 | struct kvm_memory_slot *last_used_slot; |
395 | u64 last_used_slot_gen; | |
d657a98e ZX |
396 | }; |
397 | ||
ef9989af MR |
398 | /* |
399 | * Start accounting time towards a guest. | |
400 | * Must be called before entering guest context. | |
401 | */ | |
402 | static __always_inline void guest_timing_enter_irqoff(void) | |
1ca0016c SC |
403 | { |
404 | /* | |
405 | * This is running in ioctl context so its safe to assume that it's the | |
406 | * stime pending cputime to flush. | |
407 | */ | |
408 | instrumentation_begin(); | |
409 | vtime_account_guest_enter(); | |
410 | instrumentation_end(); | |
ef9989af | 411 | } |
1ca0016c | 412 | |
ef9989af MR |
413 | /* |
414 | * Enter guest context and enter an RCU extended quiescent state. | |
415 | * | |
416 | * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is | |
417 | * unsafe to use any code which may directly or indirectly use RCU, tracing | |
418 | * (including IRQ flag tracing), or lockdep. All code in this period must be | |
419 | * non-instrumentable. | |
420 | */ | |
421 | static __always_inline void guest_context_enter_irqoff(void) | |
422 | { | |
1ca0016c SC |
423 | /* |
424 | * KVM does not hold any references to rcu protected data when it | |
425 | * switches CPU into a guest mode. In fact switching to a guest mode | |
426 | * is very similar to exiting to userspace from rcu point of view. In | |
427 | * addition CPU may stay in a guest mode for quite a long time (up to | |
428 | * one time slice). Lets treat guest mode as quiescent state, just like | |
429 | * we do with user-mode execution. | |
430 | */ | |
431 | if (!context_tracking_guest_enter()) { | |
432 | instrumentation_begin(); | |
b5ad0d2e | 433 | rcu_virt_note_context_switch(); |
1ca0016c SC |
434 | instrumentation_end(); |
435 | } | |
436 | } | |
437 | ||
ef9989af MR |
438 | /* |
439 | * Deprecated. Architectures should move to guest_timing_enter_irqoff() and | |
440 | * guest_state_enter_irqoff(). | |
441 | */ | |
442 | static __always_inline void guest_enter_irqoff(void) | |
443 | { | |
444 | guest_timing_enter_irqoff(); | |
445 | guest_context_enter_irqoff(); | |
446 | } | |
447 | ||
448 | /** | |
449 | * guest_state_enter_irqoff - Fixup state when entering a guest | |
450 | * | |
451 | * Entry to a guest will enable interrupts, but the kernel state is interrupts | |
452 | * disabled when this is invoked. Also tell RCU about it. | |
453 | * | |
454 | * 1) Trace interrupts on state | |
455 | * 2) Invoke context tracking if enabled to adjust RCU state | |
456 | * 3) Tell lockdep that interrupts are enabled | |
457 | * | |
458 | * Invoked from architecture specific code before entering a guest. | |
459 | * Must be called with interrupts disabled and the caller must be | |
460 | * non-instrumentable. | |
461 | * The caller has to invoke guest_timing_enter_irqoff() before this. | |
462 | * | |
463 | * Note: this is analogous to exit_to_user_mode(). | |
464 | */ | |
465 | static __always_inline void guest_state_enter_irqoff(void) | |
466 | { | |
467 | instrumentation_begin(); | |
468 | trace_hardirqs_on_prepare(); | |
8b023acc | 469 | lockdep_hardirqs_on_prepare(); |
ef9989af MR |
470 | instrumentation_end(); |
471 | ||
472 | guest_context_enter_irqoff(); | |
473 | lockdep_hardirqs_on(CALLER_ADDR0); | |
474 | } | |
475 | ||
476 | /* | |
477 | * Exit guest context and exit an RCU extended quiescent state. | |
478 | * | |
479 | * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is | |
480 | * unsafe to use any code which may directly or indirectly use RCU, tracing | |
481 | * (including IRQ flag tracing), or lockdep. All code in this period must be | |
482 | * non-instrumentable. | |
483 | */ | |
484 | static __always_inline void guest_context_exit_irqoff(void) | |
1ca0016c SC |
485 | { |
486 | context_tracking_guest_exit(); | |
ef9989af | 487 | } |
1ca0016c | 488 | |
ef9989af MR |
489 | /* |
490 | * Stop accounting time towards a guest. | |
491 | * Must be called after exiting guest context. | |
492 | */ | |
493 | static __always_inline void guest_timing_exit_irqoff(void) | |
494 | { | |
1ca0016c SC |
495 | instrumentation_begin(); |
496 | /* Flush the guest cputime we spent on the guest */ | |
497 | vtime_account_guest_exit(); | |
498 | instrumentation_end(); | |
499 | } | |
500 | ||
ef9989af MR |
501 | /* |
502 | * Deprecated. Architectures should move to guest_state_exit_irqoff() and | |
503 | * guest_timing_exit_irqoff(). | |
504 | */ | |
505 | static __always_inline void guest_exit_irqoff(void) | |
506 | { | |
507 | guest_context_exit_irqoff(); | |
508 | guest_timing_exit_irqoff(); | |
509 | } | |
510 | ||
1ca0016c SC |
511 | static inline void guest_exit(void) |
512 | { | |
513 | unsigned long flags; | |
514 | ||
515 | local_irq_save(flags); | |
516 | guest_exit_irqoff(); | |
517 | local_irq_restore(flags); | |
518 | } | |
519 | ||
ef9989af MR |
520 | /** |
521 | * guest_state_exit_irqoff - Establish state when returning from guest mode | |
522 | * | |
523 | * Entry from a guest disables interrupts, but guest mode is traced as | |
524 | * interrupts enabled. Also with NO_HZ_FULL RCU might be idle. | |
525 | * | |
526 | * 1) Tell lockdep that interrupts are disabled | |
527 | * 2) Invoke context tracking if enabled to reactivate RCU | |
528 | * 3) Trace interrupts off state | |
529 | * | |
530 | * Invoked from architecture specific code after exiting a guest. | |
531 | * Must be invoked with interrupts disabled and the caller must be | |
532 | * non-instrumentable. | |
533 | * The caller has to invoke guest_timing_exit_irqoff() after this. | |
534 | * | |
535 | * Note: this is analogous to enter_from_user_mode(). | |
536 | */ | |
537 | static __always_inline void guest_state_exit_irqoff(void) | |
538 | { | |
539 | lockdep_hardirqs_off(CALLER_ADDR0); | |
540 | guest_context_exit_irqoff(); | |
541 | ||
542 | instrumentation_begin(); | |
543 | trace_hardirqs_off_finish(); | |
544 | instrumentation_end(); | |
545 | } | |
546 | ||
6b7e2d09 XG |
547 | static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) |
548 | { | |
cde9af6e AJ |
549 | /* |
550 | * The memory barrier ensures a previous write to vcpu->requests cannot | |
551 | * be reordered with the read of vcpu->mode. It pairs with the general | |
552 | * memory barrier following the write of vcpu->mode in VCPU RUN. | |
553 | */ | |
554 | smp_mb__before_atomic(); | |
6b7e2d09 XG |
555 | return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); |
556 | } | |
557 | ||
660c22c4 TY |
558 | /* |
559 | * Some of the bitops functions do not support too long bitmaps. | |
560 | * This number must be determined not to exceed such limits. | |
561 | */ | |
562 | #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) | |
563 | ||
a54d8066 MS |
564 | /* |
565 | * Since at idle each memslot belongs to two memslot sets it has to contain | |
566 | * two embedded nodes for each data structure that it forms a part of. | |
567 | * | |
568 | * Two memslot sets (one active and one inactive) are necessary so the VM | |
569 | * continues to run on one memslot set while the other is being modified. | |
570 | * | |
571 | * These two memslot sets normally point to the same set of memslots. | |
572 | * They can, however, be desynchronized when performing a memslot management | |
573 | * operation by replacing the memslot to be modified by its copy. | |
574 | * After the operation is complete, both memslot sets once again point to | |
575 | * the same, common set of memslot data. | |
576 | * | |
577 | * The memslots themselves are independent of each other so they can be | |
578 | * individually added or deleted. | |
579 | */ | |
6aa8b732 | 580 | struct kvm_memory_slot { |
a54d8066 MS |
581 | struct hlist_node id_node[2]; |
582 | struct interval_tree_node hva_node[2]; | |
583 | struct rb_node gfn_node[2]; | |
6aa8b732 AK |
584 | gfn_t base_gfn; |
585 | unsigned long npages; | |
6aa8b732 | 586 | unsigned long *dirty_bitmap; |
db3fe4eb | 587 | struct kvm_arch_memory_slot arch; |
8a7ae055 | 588 | unsigned long userspace_addr; |
6104f472 | 589 | u32 flags; |
1e702d9a | 590 | short id; |
9e9eb226 | 591 | u16 as_id; |
a7800aa8 SC |
592 | |
593 | #ifdef CONFIG_KVM_PRIVATE_MEM | |
594 | struct { | |
595 | struct file __rcu *file; | |
596 | pgoff_t pgoff; | |
597 | } gmem; | |
598 | #endif | |
6aa8b732 AK |
599 | }; |
600 | ||
a7800aa8 SC |
601 | static inline bool kvm_slot_can_be_private(const struct kvm_memory_slot *slot) |
602 | { | |
603 | return slot && (slot->flags & KVM_MEM_GUEST_MEMFD); | |
604 | } | |
605 | ||
8283e36a | 606 | static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot) |
044c59c4 PX |
607 | { |
608 | return slot->flags & KVM_MEM_LOG_DIRTY_PAGES; | |
609 | } | |
610 | ||
87bf6e7d TY |
611 | static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) |
612 | { | |
613 | return ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
614 | } | |
615 | ||
03133347 CI |
616 | static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot) |
617 | { | |
618 | unsigned long len = kvm_dirty_bitmap_bytes(memslot); | |
619 | ||
620 | return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap); | |
621 | } | |
622 | ||
3c9bd400 JZ |
623 | #ifndef KVM_DIRTY_LOG_MANUAL_CAPS |
624 | #define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | |
625 | #endif | |
626 | ||
84223598 CH |
627 | struct kvm_s390_adapter_int { |
628 | u64 ind_addr; | |
629 | u64 summary_addr; | |
630 | u64 ind_offset; | |
631 | u32 summary_offset; | |
632 | u32 adapter_id; | |
633 | }; | |
634 | ||
5c919412 AS |
635 | struct kvm_hv_sint { |
636 | u32 vcpu; | |
637 | u32 sint; | |
638 | }; | |
639 | ||
14243b38 DW |
640 | struct kvm_xen_evtchn { |
641 | u32 port; | |
8733068b DW |
642 | u32 vcpu_id; |
643 | int vcpu_idx; | |
14243b38 DW |
644 | u32 priority; |
645 | }; | |
646 | ||
399ec807 AK |
647 | struct kvm_kernel_irq_routing_entry { |
648 | u32 gsi; | |
5116d8f6 | 649 | u32 type; |
4925663a | 650 | int (*set)(struct kvm_kernel_irq_routing_entry *e, |
aa2fbe6d YZ |
651 | struct kvm *kvm, int irq_source_id, int level, |
652 | bool line_status); | |
399ec807 AK |
653 | union { |
654 | struct { | |
655 | unsigned irqchip; | |
656 | unsigned pin; | |
657 | } irqchip; | |
0455e72c EA |
658 | struct { |
659 | u32 address_lo; | |
660 | u32 address_hi; | |
661 | u32 data; | |
662 | u32 flags; | |
663 | u32 devid; | |
664 | } msi; | |
84223598 | 665 | struct kvm_s390_adapter_int adapter; |
5c919412 | 666 | struct kvm_hv_sint hv_sint; |
14243b38 | 667 | struct kvm_xen_evtchn xen_evtchn; |
399ec807 | 668 | }; |
46e624b9 GN |
669 | struct hlist_node link; |
670 | }; | |
671 | ||
b053b2ae SR |
672 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
673 | struct kvm_irq_routing_table { | |
674 | int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; | |
675 | u32 nr_rt_entries; | |
676 | /* | |
677 | * Array indexed by gsi. Each entry contains list of irq chips | |
678 | * the gsi is connected to. | |
679 | */ | |
a48e1f65 | 680 | struct hlist_head map[] __counted_by(nr_rt_entries); |
b053b2ae SR |
681 | }; |
682 | #endif | |
683 | ||
d663b8a2 | 684 | bool kvm_arch_irqchip_in_kernel(struct kvm *kvm); |
b053b2ae | 685 | |
bdd1c37a CP |
686 | #ifndef KVM_INTERNAL_MEM_SLOTS |
687 | #define KVM_INTERNAL_MEM_SLOTS 0 | |
0743247f AW |
688 | #endif |
689 | ||
4fc096a9 | 690 | #define KVM_MEM_SLOTS_NUM SHRT_MAX |
bdd1c37a | 691 | #define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS) |
93a5cef0 | 692 | |
eed52e43 SC |
693 | #if KVM_MAX_NR_ADDRESS_SPACES == 1 |
694 | static inline int kvm_arch_nr_memslot_as_ids(struct kvm *kvm) | |
695 | { | |
696 | return KVM_MAX_NR_ADDRESS_SPACES; | |
697 | } | |
698 | ||
f481b069 PB |
699 | static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) |
700 | { | |
701 | return 0; | |
702 | } | |
703 | #endif | |
704 | ||
a7800aa8 SC |
705 | /* |
706 | * Arch code must define kvm_arch_has_private_mem if support for private memory | |
707 | * is enabled. | |
708 | */ | |
709 | #if !defined(kvm_arch_has_private_mem) && !IS_ENABLED(CONFIG_KVM_PRIVATE_MEM) | |
710 | static inline bool kvm_arch_has_private_mem(struct kvm *kvm) | |
711 | { | |
712 | return false; | |
713 | } | |
714 | #endif | |
715 | ||
46a26bf5 | 716 | struct kvm_memslots { |
49c7754c | 717 | u64 generation; |
a54d8066 | 718 | atomic_long_t last_used_slot; |
ed922739 | 719 | struct rb_root_cached hva_tree; |
a54d8066 | 720 | struct rb_root gfn_tree; |
26b8345a | 721 | /* |
a54d8066 | 722 | * The mapping table from slot id to memslot. |
26b8345a MS |
723 | * |
724 | * 7-bit bucket count matches the size of the old id to index array for | |
725 | * 512 slots, while giving good performance with this slot count. | |
726 | * Higher bucket counts bring only small performance improvements but | |
727 | * always result in higher memory usage (even for lower memslot counts). | |
728 | */ | |
729 | DECLARE_HASHTABLE(id_hash, 7); | |
a54d8066 | 730 | int node_idx; |
46a26bf5 MT |
731 | }; |
732 | ||
6aa8b732 | 733 | struct kvm { |
531810ca BG |
734 | #ifdef KVM_HAVE_MMU_RWLOCK |
735 | rwlock_t mmu_lock; | |
736 | #else | |
aaee2c94 | 737 | spinlock_t mmu_lock; |
531810ca BG |
738 | #endif /* KVM_HAVE_MMU_RWLOCK */ |
739 | ||
79fac95e | 740 | struct mutex slots_lock; |
b10a038e BG |
741 | |
742 | /* | |
743 | * Protects the arch-specific fields of struct kvm_memory_slots in | |
744 | * use by the VM. To be used under the slots_lock (above) or in a | |
745 | * kvm->srcu critical section where acquiring the slots_lock would | |
746 | * lead to deadlock with the synchronize_srcu in | |
b0d23708 | 747 | * kvm_swap_active_memslots(). |
b10a038e BG |
748 | */ |
749 | struct mutex slots_arch_lock; | |
6d4e4c4f | 750 | struct mm_struct *mm; /* userspace tied to this vm */ |
afa319a5 | 751 | unsigned long nr_memslot_pages; |
a54d8066 | 752 | /* The two memslot sets - active and inactive (per address space) */ |
eed52e43 | 753 | struct kvm_memslots __memslots[KVM_MAX_NR_ADDRESS_SPACES][2]; |
a54d8066 | 754 | /* The current active memslot set for each address space */ |
eed52e43 | 755 | struct kvm_memslots __rcu *memslots[KVM_MAX_NR_ADDRESS_SPACES]; |
c5b07754 | 756 | struct xarray vcpu_array; |
6c7b2202 PB |
757 | /* |
758 | * Protected by slots_lock, but can be read outside if an | |
759 | * incorrect answer is acceptable. | |
760 | */ | |
761 | atomic_t nr_memslots_dirty_logging; | |
6c7caebc | 762 | |
52ac8b35 PB |
763 | /* Used to wait for completion of MMU notifiers. */ |
764 | spinlock_t mn_invalidate_lock; | |
765 | unsigned long mn_active_invalidate_count; | |
766 | struct rcuwait mn_memslots_update_rcuwait; | |
767 | ||
982ed0de DW |
768 | /* For management / invalidation of gfn_to_pfn_caches */ |
769 | spinlock_t gpc_lock; | |
770 | struct list_head gpc_list; | |
771 | ||
6c7caebc PB |
772 | /* |
773 | * created_vcpus is protected by kvm->lock, and is incremented | |
774 | * at the beginning of KVM_CREATE_VCPU. online_vcpus is only | |
775 | * incremented after storing the kvm_vcpu pointer in vcpus, | |
776 | * and is accessed atomically. | |
777 | */ | |
73880c80 | 778 | atomic_t online_vcpus; |
f502cc56 | 779 | int max_vcpus; |
6c7caebc | 780 | int created_vcpus; |
217ece61 | 781 | int last_boosted_vcpu; |
133de902 | 782 | struct list_head vm_list; |
60eead79 | 783 | struct mutex lock; |
4a12f951 | 784 | struct kvm_io_bus __rcu *buses[KVM_NR_BUSES]; |
8ed26ab8 | 785 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
721eecbf GH |
786 | struct { |
787 | spinlock_t lock; | |
788 | struct list_head items; | |
d583fbd7 | 789 | /* resampler_list update side is protected by resampler_lock. */ |
7a84428a AW |
790 | struct list_head resampler_list; |
791 | struct mutex resampler_lock; | |
721eecbf | 792 | } irqfds; |
8ed26ab8 | 793 | #endif |
d34e6b17 | 794 | struct list_head ioeventfds; |
ba1389b7 | 795 | struct kvm_vm_stat stat; |
d69fb81f | 796 | struct kvm_arch arch; |
e3736c3e | 797 | refcount_t users_count; |
4b4357e0 | 798 | #ifdef CONFIG_KVM_MMIO |
5f94c174 | 799 | struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; |
2b3c246a SL |
800 | spinlock_t ring_lock; |
801 | struct list_head coalesced_zones; | |
5f94c174 | 802 | #endif |
e930bffe | 803 | |
60eead79 | 804 | struct mutex irq_lock; |
75858a84 | 805 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
bd2b53b2 | 806 | /* |
9957c86d | 807 | * Update side is protected by irq_lock. |
bd2b53b2 | 808 | */ |
4b6a2872 | 809 | struct kvm_irq_routing_table __rcu *irq_routing; |
c5b31cc2 | 810 | |
136bdfee | 811 | struct hlist_head irq_ack_notifier_list; |
75858a84 AK |
812 | #endif |
813 | ||
f128cf8c | 814 | #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER |
e930bffe | 815 | struct mmu_notifier mmu_notifier; |
20ec3ebd CP |
816 | unsigned long mmu_invalidate_seq; |
817 | long mmu_invalidate_in_progress; | |
8569992d CP |
818 | gfn_t mmu_invalidate_range_start; |
819 | gfn_t mmu_invalidate_range_end; | |
e930bffe | 820 | #endif |
07f0a7bd | 821 | struct list_head devices; |
3c9bd400 | 822 | u64 manual_dirty_log_protect; |
536a6f88 JF |
823 | struct dentry *debugfs_dentry; |
824 | struct kvm_stat_data **debugfs_stat_data; | |
6ade8694 PM |
825 | struct srcu_struct srcu; |
826 | struct srcu_struct irq_srcu; | |
fdeaf7e3 | 827 | pid_t userspace_pid; |
9eb8ca04 | 828 | bool override_halt_poll_ns; |
acd05785 | 829 | unsigned int max_halt_poll_ns; |
fb04a1ed | 830 | u32 dirty_ring_size; |
86bdf3eb | 831 | bool dirty_ring_with_bitmap; |
0b8f1173 | 832 | bool vm_bugged; |
f4d31653 | 833 | bool vm_dead; |
2fdef3a2 SS |
834 | |
835 | #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER | |
836 | struct notifier_block pm_notifier; | |
5a475554 CP |
837 | #endif |
838 | #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES | |
839 | /* Protected by slots_locks (for writes) and RCU (for reads) */ | |
840 | struct xarray mem_attr_array; | |
2fdef3a2 | 841 | #endif |
fcfe1bae | 842 | char stats_id[KVM_STATS_NAME_SIZE]; |
6aa8b732 AK |
843 | }; |
844 | ||
a737f256 CD |
845 | #define kvm_err(fmt, ...) \ |
846 | pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) | |
847 | #define kvm_info(fmt, ...) \ | |
848 | pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) | |
849 | #define kvm_debug(fmt, ...) \ | |
850 | pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) | |
ae0f5499 BD |
851 | #define kvm_debug_ratelimited(fmt, ...) \ |
852 | pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \ | |
853 | ## __VA_ARGS__) | |
a737f256 CD |
854 | #define kvm_pr_unimpl(fmt, ...) \ |
855 | pr_err_ratelimited("kvm [%i]: " fmt, \ | |
856 | task_tgid_nr(current), ## __VA_ARGS__) | |
f0242478 | 857 | |
a737f256 CD |
858 | /* The guest did something we don't support. */ |
859 | #define vcpu_unimpl(vcpu, fmt, ...) \ | |
671d9ab3 BP |
860 | kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \ |
861 | (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__) | |
6aa8b732 | 862 | |
ee86dbc6 AS |
863 | #define vcpu_debug(vcpu, fmt, ...) \ |
864 | kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) | |
ae0f5499 BD |
865 | #define vcpu_debug_ratelimited(vcpu, fmt, ...) \ |
866 | kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \ | |
867 | ## __VA_ARGS__) | |
765eaa0f AS |
868 | #define vcpu_err(vcpu, fmt, ...) \ |
869 | kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) | |
ee86dbc6 | 870 | |
f4d31653 PB |
871 | static inline void kvm_vm_dead(struct kvm *kvm) |
872 | { | |
873 | kvm->vm_dead = true; | |
874 | kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD); | |
875 | } | |
876 | ||
0b8f1173 SC |
877 | static inline void kvm_vm_bugged(struct kvm *kvm) |
878 | { | |
879 | kvm->vm_bugged = true; | |
f4d31653 | 880 | kvm_vm_dead(kvm); |
0b8f1173 SC |
881 | } |
882 | ||
f4d31653 | 883 | |
0b8f1173 SC |
884 | #define KVM_BUG(cond, kvm, fmt...) \ |
885 | ({ \ | |
c9d60154 | 886 | bool __ret = !!(cond); \ |
0b8f1173 SC |
887 | \ |
888 | if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \ | |
889 | kvm_vm_bugged(kvm); \ | |
890 | unlikely(__ret); \ | |
891 | }) | |
892 | ||
893 | #define KVM_BUG_ON(cond, kvm) \ | |
894 | ({ \ | |
c9d60154 | 895 | bool __ret = !!(cond); \ |
0b8f1173 SC |
896 | \ |
897 | if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \ | |
898 | kvm_vm_bugged(kvm); \ | |
899 | unlikely(__ret); \ | |
900 | }) | |
901 | ||
52e322ed SC |
902 | /* |
903 | * Note, "data corruption" refers to corruption of host kernel data structures, | |
904 | * not guest data. Guest data corruption, suspected or confirmed, that is tied | |
905 | * and contained to a single VM should *never* BUG() and potentially panic the | |
906 | * host, i.e. use this variant of KVM_BUG() if and only if a KVM data structure | |
907 | * is corrupted and that corruption can have a cascading effect to other parts | |
908 | * of the hosts and/or to other VMs. | |
909 | */ | |
910 | #define KVM_BUG_ON_DATA_CORRUPTION(cond, kvm) \ | |
911 | ({ \ | |
912 | bool __ret = !!(cond); \ | |
913 | \ | |
914 | if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) \ | |
915 | BUG_ON(__ret); \ | |
916 | else if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \ | |
917 | kvm_vm_bugged(kvm); \ | |
918 | unlikely(__ret); \ | |
919 | }) | |
920 | ||
2031f287 SC |
921 | static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu) |
922 | { | |
923 | #ifdef CONFIG_PROVE_RCU | |
924 | WARN_ONCE(vcpu->srcu_depth++, | |
925 | "KVM: Illegal vCPU srcu_idx LOCK, depth=%d", vcpu->srcu_depth - 1); | |
926 | #endif | |
927 | vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | |
928 | } | |
929 | ||
930 | static inline void kvm_vcpu_srcu_read_unlock(struct kvm_vcpu *vcpu) | |
931 | { | |
932 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx); | |
933 | ||
934 | #ifdef CONFIG_PROVE_RCU | |
935 | WARN_ONCE(--vcpu->srcu_depth, | |
936 | "KVM: Illegal vCPU srcu_idx UNLOCK, depth=%d", vcpu->srcu_depth); | |
937 | #endif | |
938 | } | |
939 | ||
3c9bd400 JZ |
940 | static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) |
941 | { | |
942 | return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); | |
943 | } | |
944 | ||
4a12f951 CB |
945 | static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) |
946 | { | |
947 | return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, | |
3898da94 PB |
948 | lockdep_is_held(&kvm->slots_lock) || |
949 | !refcount_read(&kvm->users_count)); | |
4a12f951 CB |
950 | } |
951 | ||
988a2cae GN |
952 | static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) |
953 | { | |
1d487e9b PB |
954 | int num_vcpus = atomic_read(&kvm->online_vcpus); |
955 | i = array_index_nospec(i, num_vcpus); | |
956 | ||
957 | /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ | |
988a2cae | 958 | smp_rmb(); |
c5b07754 | 959 | return xa_load(&kvm->vcpu_array, i); |
988a2cae GN |
960 | } |
961 | ||
214bd3a6 MZ |
962 | #define kvm_for_each_vcpu(idx, vcpup, kvm) \ |
963 | xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \ | |
964 | (atomic_read(&kvm->online_vcpus) - 1)) | |
988a2cae | 965 | |
db27a7a3 DH |
966 | static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) |
967 | { | |
9b9e3fc4 | 968 | struct kvm_vcpu *vcpu = NULL; |
46808a4c | 969 | unsigned long i; |
db27a7a3 | 970 | |
9b9e3fc4 | 971 | if (id < 0) |
c896939f | 972 | return NULL; |
9b9e3fc4 GK |
973 | if (id < KVM_MAX_VCPUS) |
974 | vcpu = kvm_get_vcpu(kvm, id); | |
c896939f DH |
975 | if (vcpu && vcpu->vcpu_id == id) |
976 | return vcpu; | |
db27a7a3 DH |
977 | kvm_for_each_vcpu(i, vcpu, kvm) |
978 | if (vcpu->vcpu_id == id) | |
979 | return vcpu; | |
980 | return NULL; | |
981 | } | |
982 | ||
27592ae8 | 983 | void kvm_destroy_vcpus(struct kvm *kvm); |
fb3f0f51 | 984 | |
ec7660cc | 985 | void vcpu_load(struct kvm_vcpu *vcpu); |
313a3dc7 CO |
986 | void vcpu_put(struct kvm_vcpu *vcpu); |
987 | ||
6ef768fa | 988 | #ifdef __KVM_HAVE_IOAPIC |
993225ad | 989 | void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm); |
abdb080f | 990 | void kvm_arch_post_irq_routing_update(struct kvm *kvm); |
6ef768fa | 991 | #else |
993225ad | 992 | static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) |
6ef768fa PB |
993 | { |
994 | } | |
abdb080f | 995 | static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) |
b053b2ae SR |
996 | { |
997 | } | |
6ef768fa PB |
998 | #endif |
999 | ||
c5b31cc2 | 1000 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
a0f155e9 CH |
1001 | int kvm_irqfd_init(void); |
1002 | void kvm_irqfd_exit(void); | |
1003 | #else | |
1004 | static inline int kvm_irqfd_init(void) | |
1005 | { | |
1006 | return 0; | |
1007 | } | |
1008 | ||
1009 | static inline void kvm_irqfd_exit(void) | |
1010 | { | |
1011 | } | |
1012 | #endif | |
81a1cf9f | 1013 | int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module); |
cb498ea2 | 1014 | void kvm_exit(void); |
6aa8b732 | 1015 | |
d39f13b0 | 1016 | void kvm_get_kvm(struct kvm *kvm); |
605c7130 | 1017 | bool kvm_get_kvm_safe(struct kvm *kvm); |
d39f13b0 | 1018 | void kvm_put_kvm(struct kvm *kvm); |
54526d1f | 1019 | bool file_is_kvm(struct file *file); |
149487bd | 1020 | void kvm_put_kvm_no_destroy(struct kvm *kvm); |
d39f13b0 | 1021 | |
f481b069 | 1022 | static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) |
90d83dc3 | 1023 | { |
eed52e43 | 1024 | as_id = array_index_nospec(as_id, KVM_MAX_NR_ADDRESS_SPACES); |
7e988b10 | 1025 | return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, |
3898da94 PB |
1026 | lockdep_is_held(&kvm->slots_lock) || |
1027 | !refcount_read(&kvm->users_count)); | |
90d83dc3 LJ |
1028 | } |
1029 | ||
f481b069 PB |
1030 | static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) |
1031 | { | |
1032 | return __kvm_memslots(kvm, 0); | |
1033 | } | |
1034 | ||
8e73485c PB |
1035 | static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) |
1036 | { | |
f481b069 PB |
1037 | int as_id = kvm_arch_vcpu_memslots_id(vcpu); |
1038 | ||
1039 | return __kvm_memslots(vcpu->kvm, as_id); | |
8e73485c PB |
1040 | } |
1041 | ||
a54d8066 MS |
1042 | static inline bool kvm_memslots_empty(struct kvm_memslots *slots) |
1043 | { | |
1044 | return RB_EMPTY_ROOT(&slots->gfn_tree); | |
1045 | } | |
1046 | ||
26f45714 RK |
1047 | bool kvm_are_all_memslots_empty(struct kvm *kvm); |
1048 | ||
a54d8066 MS |
1049 | #define kvm_for_each_memslot(memslot, bkt, slots) \ |
1050 | hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \ | |
1051 | if (WARN_ON_ONCE(!memslot->npages)) { \ | |
1052 | } else | |
1053 | ||
0577d1ab SC |
1054 | static inline |
1055 | struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id) | |
28a37544 | 1056 | { |
f85e2cb5 | 1057 | struct kvm_memory_slot *slot; |
a54d8066 | 1058 | int idx = slots->node_idx; |
bf3e05bc | 1059 | |
a54d8066 | 1060 | hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) { |
26b8345a MS |
1061 | if (slot->id == id) |
1062 | return slot; | |
1063 | } | |
bf3e05bc | 1064 | |
26b8345a | 1065 | return NULL; |
28a37544 XG |
1066 | } |
1067 | ||
f4209439 MS |
1068 | /* Iterator used for walking memslots that overlap a gfn range. */ |
1069 | struct kvm_memslot_iter { | |
1070 | struct kvm_memslots *slots; | |
1071 | struct rb_node *node; | |
1072 | struct kvm_memory_slot *slot; | |
1073 | }; | |
1074 | ||
1075 | static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter) | |
1076 | { | |
1077 | iter->node = rb_next(iter->node); | |
1078 | if (!iter->node) | |
1079 | return; | |
1080 | ||
1081 | iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]); | |
1082 | } | |
1083 | ||
1084 | static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter, | |
1085 | struct kvm_memslots *slots, | |
1086 | gfn_t start) | |
1087 | { | |
1088 | int idx = slots->node_idx; | |
1089 | struct rb_node *tmp; | |
1090 | struct kvm_memory_slot *slot; | |
1091 | ||
1092 | iter->slots = slots; | |
1093 | ||
1094 | /* | |
1095 | * Find the so called "upper bound" of a key - the first node that has | |
1096 | * its key strictly greater than the searched one (the start gfn in our case). | |
1097 | */ | |
1098 | iter->node = NULL; | |
1099 | for (tmp = slots->gfn_tree.rb_node; tmp; ) { | |
1100 | slot = container_of(tmp, struct kvm_memory_slot, gfn_node[idx]); | |
1101 | if (start < slot->base_gfn) { | |
1102 | iter->node = tmp; | |
1103 | tmp = tmp->rb_left; | |
1104 | } else { | |
1105 | tmp = tmp->rb_right; | |
1106 | } | |
1107 | } | |
1108 | ||
1109 | /* | |
1110 | * Find the slot with the lowest gfn that can possibly intersect with | |
1111 | * the range, so we'll ideally have slot start <= range start | |
1112 | */ | |
1113 | if (iter->node) { | |
1114 | /* | |
1115 | * A NULL previous node means that the very first slot | |
1116 | * already has a higher start gfn. | |
1117 | * In this case slot start > range start. | |
1118 | */ | |
1119 | tmp = rb_prev(iter->node); | |
1120 | if (tmp) | |
1121 | iter->node = tmp; | |
1122 | } else { | |
1123 | /* a NULL node below means no slots */ | |
1124 | iter->node = rb_last(&slots->gfn_tree); | |
1125 | } | |
1126 | ||
1127 | if (iter->node) { | |
1128 | iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]); | |
1129 | ||
1130 | /* | |
1131 | * It is possible in the slot start < range start case that the | |
1132 | * found slot ends before or at range start (slot end <= range start) | |
1133 | * and so it does not overlap the requested range. | |
1134 | * | |
1135 | * In such non-overlapping case the next slot (if it exists) will | |
1136 | * already have slot start > range start, otherwise the logic above | |
1137 | * would have found it instead of the current slot. | |
1138 | */ | |
1139 | if (iter->slot->base_gfn + iter->slot->npages <= start) | |
1140 | kvm_memslot_iter_next(iter); | |
1141 | } | |
1142 | } | |
1143 | ||
1144 | static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end) | |
1145 | { | |
1146 | if (!iter->node) | |
1147 | return false; | |
1148 | ||
1149 | /* | |
1150 | * If this slot starts beyond or at the end of the range so does | |
1151 | * every next one | |
1152 | */ | |
1153 | return iter->slot->base_gfn < end; | |
1154 | } | |
1155 | ||
1156 | /* Iterate over each memslot at least partially intersecting [start, end) range */ | |
1157 | #define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) \ | |
1158 | for (kvm_memslot_iter_start(iter, slots, start); \ | |
1159 | kvm_memslot_iter_is_valid(iter, end); \ | |
1160 | kvm_memslot_iter_next(iter)) | |
1161 | ||
74d0727c TY |
1162 | /* |
1163 | * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: | |
1164 | * - create a new memory slot | |
1165 | * - delete an existing memory slot | |
1166 | * - modify an existing memory slot | |
1167 | * -- move it in the guest physical memory space | |
1168 | * -- just change its flags | |
1169 | * | |
1170 | * Since flags can be changed by some of these operations, the following | |
1171 | * differentiation is the best we can do for __kvm_set_memory_region(): | |
1172 | */ | |
1173 | enum kvm_mr_change { | |
1174 | KVM_MR_CREATE, | |
1175 | KVM_MR_DELETE, | |
1176 | KVM_MR_MOVE, | |
1177 | KVM_MR_FLAGS_ONLY, | |
1178 | }; | |
1179 | ||
210c7c4d | 1180 | int kvm_set_memory_region(struct kvm *kvm, |
bb58b90b | 1181 | const struct kvm_userspace_memory_region2 *mem); |
f78e0e2e | 1182 | int __kvm_set_memory_region(struct kvm *kvm, |
bb58b90b | 1183 | const struct kvm_userspace_memory_region2 *mem); |
e96c81ee | 1184 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); |
15248258 | 1185 | void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); |
f7784b8e | 1186 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
537a17b3 SC |
1187 | const struct kvm_memory_slot *old, |
1188 | struct kvm_memory_slot *new, | |
7b6195a9 | 1189 | enum kvm_mr_change change); |
f7784b8e | 1190 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
9d4c197c | 1191 | struct kvm_memory_slot *old, |
f36f3f28 | 1192 | const struct kvm_memory_slot *new, |
8482644a | 1193 | enum kvm_mr_change change); |
2df72e9b MT |
1194 | /* flush all memory translations */ |
1195 | void kvm_arch_flush_shadow_all(struct kvm *kvm); | |
1196 | /* flush memory translations pointing to 'slot' */ | |
1197 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | |
1198 | struct kvm_memory_slot *slot); | |
a983fb23 | 1199 | |
d9ef13c2 PB |
1200 | int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, |
1201 | struct page **pages, int nr_pages); | |
48987781 | 1202 | |
954bbbc2 | 1203 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
05da4558 | 1204 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); |
ba6a3541 | 1205 | unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); |
4d8b81ab | 1206 | unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
64d83126 CD |
1207 | unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, |
1208 | bool *writable); | |
b4231d61 IE |
1209 | void kvm_release_page_clean(struct page *page); |
1210 | void kvm_release_page_dirty(struct page *page); | |
35149e21 | 1211 | |
ba049e93 DW |
1212 | kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); |
1213 | kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, | |
612819c3 | 1214 | bool *writable); |
8283e36a BG |
1215 | kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn); |
1216 | kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn); | |
1217 | kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, | |
c8b88b33 PX |
1218 | bool atomic, bool interruptible, bool *async, |
1219 | bool write_fault, bool *writable, hva_t *hva); | |
037d92dc | 1220 | |
ba049e93 | 1221 | void kvm_release_pfn_clean(kvm_pfn_t pfn); |
f7a6509f | 1222 | void kvm_release_pfn_dirty(kvm_pfn_t pfn); |
ba049e93 DW |
1223 | void kvm_set_pfn_dirty(kvm_pfn_t pfn); |
1224 | void kvm_set_pfn_accessed(kvm_pfn_t pfn); | |
35149e21 | 1225 | |
357a18ad | 1226 | void kvm_release_pfn(kvm_pfn_t pfn, bool dirty); |
195aefde IE |
1227 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, |
1228 | int len); | |
1229 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); | |
4e335d9e PB |
1230 | int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
1231 | void *data, unsigned long len); | |
0958f0ce VK |
1232 | int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
1233 | void *data, unsigned int offset, | |
1234 | unsigned long len); | |
195aefde IE |
1235 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, |
1236 | int offset, int len); | |
1237 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, | |
1238 | unsigned long len); | |
4e335d9e PB |
1239 | int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
1240 | void *data, unsigned long len); | |
1241 | int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | |
7a86dab8 JM |
1242 | void *data, unsigned int offset, |
1243 | unsigned long len); | |
4e335d9e PB |
1244 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
1245 | gpa_t gpa, unsigned long len); | |
cac0f1b7 | 1246 | |
53f98558 AJ |
1247 | #define __kvm_get_guest(kvm, gfn, offset, v) \ |
1248 | ({ \ | |
1249 | unsigned long __addr = gfn_to_hva(kvm, gfn); \ | |
1250 | typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ | |
1251 | int __ret = -EFAULT; \ | |
1252 | \ | |
1253 | if (!kvm_is_error_hva(__addr)) \ | |
1254 | __ret = get_user(v, __uaddr); \ | |
1255 | __ret; \ | |
1256 | }) | |
1257 | ||
1258 | #define kvm_get_guest(kvm, gpa, v) \ | |
1259 | ({ \ | |
1260 | gpa_t __gpa = gpa; \ | |
1261 | struct kvm *__kvm = kvm; \ | |
1262 | \ | |
1263 | __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \ | |
1264 | offset_in_page(__gpa), v); \ | |
1265 | }) | |
1266 | ||
4d2d4ce0 | 1267 | #define __kvm_put_guest(kvm, gfn, offset, v) \ |
cac0f1b7 SP |
1268 | ({ \ |
1269 | unsigned long __addr = gfn_to_hva(kvm, gfn); \ | |
4d2d4ce0 | 1270 | typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ |
cac0f1b7 SP |
1271 | int __ret = -EFAULT; \ |
1272 | \ | |
1273 | if (!kvm_is_error_hva(__addr)) \ | |
4d2d4ce0 | 1274 | __ret = put_user(v, __uaddr); \ |
cac0f1b7 SP |
1275 | if (!__ret) \ |
1276 | mark_page_dirty(kvm, gfn); \ | |
1277 | __ret; \ | |
1278 | }) | |
1279 | ||
4d2d4ce0 | 1280 | #define kvm_put_guest(kvm, gpa, v) \ |
cac0f1b7 SP |
1281 | ({ \ |
1282 | gpa_t __gpa = gpa; \ | |
1283 | struct kvm *__kvm = kvm; \ | |
4d2d4ce0 | 1284 | \ |
cac0f1b7 | 1285 | __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \ |
4d2d4ce0 | 1286 | offset_in_page(__gpa), v); \ |
cac0f1b7 SP |
1287 | }) |
1288 | ||
195aefde | 1289 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); |
6aa8b732 | 1290 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); |
33e94154 | 1291 | bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); |
995decb6 | 1292 | bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); |
f9b84e19 | 1293 | unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); |
8283e36a | 1294 | void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn); |
6aa8b732 AK |
1295 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); |
1296 | ||
8e73485c PB |
1297 | struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); |
1298 | struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); | |
ba049e93 DW |
1299 | kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); |
1300 | kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); | |
e45adf66 | 1301 | int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); |
e45adf66 | 1302 | void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); |
8e73485c PB |
1303 | unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); |
1304 | unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); | |
1305 | int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, | |
1306 | int len); | |
1307 | int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, | |
1308 | unsigned long len); | |
1309 | int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, | |
1310 | unsigned long len); | |
1311 | int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, | |
1312 | int offset, int len); | |
1313 | int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, | |
1314 | unsigned long len); | |
1315 | void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); | |
1316 | ||
982ed0de | 1317 | /** |
52491a38 ML |
1318 | * kvm_gpc_init - initialize gfn_to_pfn_cache. |
1319 | * | |
1320 | * @gpc: struct gfn_to_pfn_cache object. | |
982ed0de | 1321 | * @kvm: pointer to kvm instance. |
8c82a0b3 ML |
1322 | * |
1323 | * This sets up a gfn_to_pfn_cache by initializing locks and assigning the | |
1324 | * immutable attributes. Note, the cache must be zero-allocated (or zeroed by | |
1325 | * the caller before init). | |
1326 | */ | |
a4bff3df | 1327 | void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm); |
8c82a0b3 ML |
1328 | |
1329 | /** | |
1330 | * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest | |
1331 | * physical address. | |
1332 | * | |
1333 | * @gpc: struct gfn_to_pfn_cache object. | |
982ed0de DW |
1334 | * @gpa: guest physical address to map. |
1335 | * @len: sanity check; the range being access must fit a single page. | |
982ed0de DW |
1336 | * |
1337 | * @return: 0 for success. | |
1338 | * -EINVAL for a mapping which would cross a page boundary. | |
8c82a0b3 | 1339 | * -EFAULT for an untranslatable guest physical address. |
982ed0de | 1340 | * |
8c82a0b3 | 1341 | * This primes a gfn_to_pfn_cache and links it into the @gpc->kvm's list for |
aba3caef ML |
1342 | * invalidations to be processed. Callers are required to use kvm_gpc_check() |
1343 | * to ensure that the cache is valid before accessing the target page. | |
982ed0de | 1344 | */ |
8c82a0b3 | 1345 | int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len); |
982ed0de | 1346 | |
721f5b0d PD |
1347 | /** |
1348 | * kvm_gpc_activate_hva - prepare a cached kernel mapping and HPA for a given HVA. | |
1349 | * | |
1350 | * @gpc: struct gfn_to_pfn_cache object. | |
1351 | * @hva: userspace virtual address to map. | |
1352 | * @len: sanity check; the range being access must fit a single page. | |
1353 | * | |
1354 | * @return: 0 for success. | |
1355 | * -EINVAL for a mapping which would cross a page boundary. | |
1356 | * -EFAULT for an untranslatable guest physical address. | |
1357 | * | |
1358 | * The semantics of this function are the same as those of kvm_gpc_activate(). It | |
1359 | * merely bypasses a layer of address translation. | |
1360 | */ | |
1361 | int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long hva, unsigned long len); | |
1362 | ||
982ed0de | 1363 | /** |
aba3caef | 1364 | * kvm_gpc_check - check validity of a gfn_to_pfn_cache. |
982ed0de | 1365 | * |
982ed0de | 1366 | * @gpc: struct gfn_to_pfn_cache object. |
982ed0de | 1367 | * @len: sanity check; the range being access must fit a single page. |
982ed0de DW |
1368 | * |
1369 | * @return: %true if the cache is still valid and the address matches. | |
1370 | * %false if the cache is not valid. | |
1371 | * | |
1372 | * Callers outside IN_GUEST_MODE context should hold a read lock on @gpc->lock | |
1373 | * while calling this function, and then continue to hold the lock until the | |
1374 | * access is complete. | |
1375 | * | |
1376 | * Callers in IN_GUEST_MODE may do so without locking, although they should | |
1377 | * still hold a read lock on kvm->scru for the memslot checks. | |
1378 | */ | |
58f5ee5f | 1379 | bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len); |
982ed0de DW |
1380 | |
1381 | /** | |
aba3caef | 1382 | * kvm_gpc_refresh - update a previously initialized cache. |
982ed0de | 1383 | * |
982ed0de | 1384 | * @gpc: struct gfn_to_pfn_cache object. |
982ed0de | 1385 | * @len: sanity check; the range being access must fit a single page. |
982ed0de DW |
1386 | * |
1387 | * @return: 0 for success. | |
1388 | * -EINVAL for a mapping which would cross a page boundary. | |
0318f207 | 1389 | * -EFAULT for an untranslatable guest physical address. |
982ed0de DW |
1390 | * |
1391 | * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful | |
0318f207 | 1392 | * return from this function does not mean the page can be immediately |
982ed0de DW |
1393 | * accessed because it may have raced with an invalidation. Callers must |
1394 | * still lock and check the cache status, as this function does not return | |
1395 | * with the lock still held to permit access. | |
1396 | */ | |
58f5ee5f | 1397 | int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len); |
982ed0de DW |
1398 | |
1399 | /** | |
52491a38 | 1400 | * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache. |
982ed0de | 1401 | * |
982ed0de DW |
1402 | * @gpc: struct gfn_to_pfn_cache object. |
1403 | * | |
8c82a0b3 | 1404 | * This removes a cache from the VM's list to be processed on MMU notifier |
982ed0de DW |
1405 | * invocation. |
1406 | */ | |
8c82a0b3 | 1407 | void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc); |
982ed0de | 1408 | |
721f5b0d PD |
1409 | static inline bool kvm_gpc_is_gpa_active(struct gfn_to_pfn_cache *gpc) |
1410 | { | |
1411 | return gpc->active && !kvm_is_error_gpa(gpc->gpa); | |
1412 | } | |
1413 | ||
1414 | static inline bool kvm_gpc_is_hva_active(struct gfn_to_pfn_cache *gpc) | |
1415 | { | |
1416 | return gpc->active && kvm_is_error_gpa(gpc->gpa); | |
1417 | } | |
1418 | ||
20b7035c JS |
1419 | void kvm_sigset_activate(struct kvm_vcpu *vcpu); |
1420 | void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); | |
1421 | ||
91b99ea7 | 1422 | void kvm_vcpu_halt(struct kvm_vcpu *vcpu); |
fac42688 | 1423 | bool kvm_vcpu_block(struct kvm_vcpu *vcpu); |
3217f7c2 CD |
1424 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); |
1425 | void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); | |
178f02ff | 1426 | bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); |
b6d33834 | 1427 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
fa93384f | 1428 | int kvm_vcpu_yield_to(struct kvm_vcpu *target); |
b9926482 | 1429 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode); |
a4ee1ca4 | 1430 | |
d9e368d6 | 1431 | void kvm_flush_remote_tlbs(struct kvm *kvm); |
d4788996 | 1432 | void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages); |
619b5072 DM |
1433 | void kvm_flush_remote_tlbs_memslot(struct kvm *kvm, |
1434 | const struct kvm_memory_slot *memslot); | |
7053df4e | 1435 | |
6926f95a SC |
1436 | #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE |
1437 | int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min); | |
837f66c7 | 1438 | int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min); |
6926f95a SC |
1439 | int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc); |
1440 | void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc); | |
1441 | void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); | |
1442 | #endif | |
1443 | ||
8569992d CP |
1444 | void kvm_mmu_invalidate_begin(struct kvm *kvm); |
1445 | void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end); | |
1446 | void kvm_mmu_invalidate_end(struct kvm *kvm); | |
a7800aa8 | 1447 | bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); |
edb298c6 | 1448 | |
043405e1 CO |
1449 | long kvm_arch_dev_ioctl(struct file *filp, |
1450 | unsigned int ioctl, unsigned long arg); | |
313a3dc7 CO |
1451 | long kvm_arch_vcpu_ioctl(struct file *filp, |
1452 | unsigned int ioctl, unsigned long arg); | |
1499fa80 | 1453 | vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); |
018d00d2 | 1454 | |
784aa3d7 | 1455 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); |
018d00d2 | 1456 | |
3b0f1d01 | 1457 | void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, |
ba0513b5 MS |
1458 | struct kvm_memory_slot *slot, |
1459 | gfn_t gfn_offset, | |
1460 | unsigned long mask); | |
0dff0846 SC |
1461 | void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot); |
1462 | ||
619b5072 | 1463 | #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT |
0dff0846 SC |
1464 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); |
1465 | int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, | |
2a49f61d | 1466 | int *is_dirty, struct kvm_memory_slot **memslot); |
0dff0846 | 1467 | #endif |
5bb064dc | 1468 | |
aa2fbe6d YZ |
1469 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, |
1470 | bool line_status); | |
e5d83c74 PB |
1471 | int kvm_vm_ioctl_enable_cap(struct kvm *kvm, |
1472 | struct kvm_enable_cap *cap); | |
d8708b80 | 1473 | int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); |
ed51862f AG |
1474 | long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, |
1475 | unsigned long arg); | |
313a3dc7 | 1476 | |
d0752060 HB |
1477 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); |
1478 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); | |
1479 | ||
8b006791 ZX |
1480 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
1481 | struct kvm_translation *tr); | |
1482 | ||
b6c7a5dc HB |
1483 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); |
1484 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); | |
1485 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |
1486 | struct kvm_sregs *sregs); | |
1487 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |
1488 | struct kvm_sregs *sregs); | |
62d9f0db MT |
1489 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
1490 | struct kvm_mp_state *mp_state); | |
1491 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |
1492 | struct kvm_mp_state *mp_state); | |
d0bfb940 JK |
1493 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
1494 | struct kvm_guest_debug *dbg); | |
1b94f6f8 | 1495 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu); |
b6c7a5dc | 1496 | |
e790d9ef RK |
1497 | void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); |
1498 | ||
e9b11c17 ZX |
1499 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
1500 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); | |
897cc38e | 1501 | int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id); |
e529ef66 | 1502 | int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu); |
31928aa5 | 1503 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); |
d40ccc62 | 1504 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); |
e9b11c17 | 1505 | |
2fdef3a2 SS |
1506 | #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER |
1507 | int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state); | |
1508 | #endif | |
1509 | ||
741cbbae | 1510 | #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS |
d56f5136 | 1511 | void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry); |
e36de87d VP |
1512 | #else |
1513 | static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {} | |
741cbbae | 1514 | #endif |
235539b4 | 1515 | |
441f7bfa | 1516 | #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING |
13a34e06 RK |
1517 | int kvm_arch_hardware_enable(void); |
1518 | void kvm_arch_hardware_disable(void); | |
441f7bfa | 1519 | #endif |
1d737c8a | 1520 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); |
199b5763 | 1521 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); |
b6d33834 | 1522 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); |
17e433b5 | 1523 | bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); |
52acd22f | 1524 | bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu); |
77bcd9e6 | 1525 | bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu); |
d970a325 PB |
1526 | int kvm_arch_post_init_vm(struct kvm *kvm); |
1527 | void kvm_arch_pre_destroy_vm(struct kvm *kvm); | |
284851ee | 1528 | void kvm_arch_create_vm_debugfs(struct kvm *kvm); |
e9b11c17 | 1529 | |
d89f5eff | 1530 | #ifndef __KVM_HAVE_ARCH_VM_ALLOC |
d1e5b0e9 MO |
1531 | /* |
1532 | * All architectures that want to use vzalloc currently also | |
1533 | * need their own kvm_arch_alloc_vm implementation. | |
1534 | */ | |
d89f5eff JK |
1535 | static inline struct kvm *kvm_arch_alloc_vm(void) |
1536 | { | |
b1cd1633 | 1537 | return kzalloc(sizeof(struct kvm), GFP_KERNEL_ACCOUNT); |
d89f5eff | 1538 | } |
78b497f2 JG |
1539 | #endif |
1540 | ||
1541 | static inline void __kvm_arch_free_vm(struct kvm *kvm) | |
1542 | { | |
1543 | kvfree(kvm); | |
1544 | } | |
d89f5eff | 1545 | |
78b497f2 | 1546 | #ifndef __KVM_HAVE_ARCH_VM_FREE |
d89f5eff JK |
1547 | static inline void kvm_arch_free_vm(struct kvm *kvm) |
1548 | { | |
78b497f2 | 1549 | __kvm_arch_free_vm(kvm); |
d89f5eff JK |
1550 | } |
1551 | #endif | |
1552 | ||
a1342c80 DM |
1553 | #ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS |
1554 | static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm) | |
b08660e5 TL |
1555 | { |
1556 | return -ENOTSUPP; | |
1557 | } | |
cfb0c08e RRA |
1558 | #else |
1559 | int kvm_arch_flush_remote_tlbs(struct kvm *kvm); | |
b08660e5 TL |
1560 | #endif |
1561 | ||
d4788996 DM |
1562 | #ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE |
1563 | static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, | |
1564 | gfn_t gfn, u64 nr_pages) | |
1565 | { | |
1566 | return -EOPNOTSUPP; | |
1567 | } | |
1568 | #else | |
1569 | int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages); | |
1570 | #endif | |
1571 | ||
e0f0bbc5 AW |
1572 | #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA |
1573 | void kvm_arch_register_noncoherent_dma(struct kvm *kvm); | |
1574 | void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); | |
1575 | bool kvm_arch_has_noncoherent_dma(struct kvm *kvm); | |
1576 | #else | |
1577 | static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) | |
1578 | { | |
1579 | } | |
1580 | ||
1581 | static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) | |
1582 | { | |
1583 | } | |
1584 | ||
1585 | static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) | |
1586 | { | |
1587 | return false; | |
1588 | } | |
1589 | #endif | |
5544eb9b PB |
1590 | #ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE |
1591 | void kvm_arch_start_assignment(struct kvm *kvm); | |
1592 | void kvm_arch_end_assignment(struct kvm *kvm); | |
1593 | bool kvm_arch_has_assigned_device(struct kvm *kvm); | |
1594 | #else | |
1595 | static inline void kvm_arch_start_assignment(struct kvm *kvm) | |
1596 | { | |
1597 | } | |
1598 | ||
1599 | static inline void kvm_arch_end_assignment(struct kvm *kvm) | |
1600 | { | |
1601 | } | |
1602 | ||
742ab6df | 1603 | static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm) |
5544eb9b PB |
1604 | { |
1605 | return false; | |
1606 | } | |
1607 | #endif | |
e0f0bbc5 | 1608 | |
da4ad88c | 1609 | static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu) |
b6d33834 | 1610 | { |
2246f8b5 | 1611 | #ifdef __KVM_HAVE_ARCH_WQP |
da4ad88c | 1612 | return vcpu->arch.waitp; |
2246f8b5 | 1613 | #else |
da4ad88c | 1614 | return &vcpu->wait; |
b6d33834 | 1615 | #endif |
2246f8b5 | 1616 | } |
b6d33834 | 1617 | |
d92a5d1c SC |
1618 | /* |
1619 | * Wake a vCPU if necessary, but don't do any stats/metadata updates. Returns | |
1620 | * true if the vCPU was blocking and was awakened, false otherwise. | |
1621 | */ | |
1622 | static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) | |
1623 | { | |
1624 | return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu)); | |
1625 | } | |
1626 | ||
1627 | static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu) | |
1628 | { | |
1629 | return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu)); | |
1630 | } | |
1631 | ||
01c94e64 EA |
1632 | #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED |
1633 | /* | |
1634 | * returns true if the virtual interrupt controller is initialized and | |
1635 | * ready to accept virtual IRQ. On some architectures the virtual interrupt | |
1636 | * controller is dynamically instantiated and this is not always true. | |
1637 | */ | |
1638 | bool kvm_arch_intc_initialized(struct kvm *kvm); | |
1639 | #else | |
1640 | static inline bool kvm_arch_intc_initialized(struct kvm *kvm) | |
1641 | { | |
1642 | return true; | |
1643 | } | |
1644 | #endif | |
1645 | ||
e1bfc245 SC |
1646 | #ifdef CONFIG_GUEST_PERF_EVENTS |
1647 | unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu); | |
1648 | ||
1649 | void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void)); | |
1650 | void kvm_unregister_perf_callbacks(void); | |
1651 | #else | |
1652 | static inline void kvm_register_perf_callbacks(void *ign) {} | |
1653 | static inline void kvm_unregister_perf_callbacks(void) {} | |
1654 | #endif /* CONFIG_GUEST_PERF_EVENTS */ | |
1655 | ||
e08b9637 | 1656 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); |
d19a9cd2 | 1657 | void kvm_arch_destroy_vm(struct kvm *kvm); |
ad8ba2cd | 1658 | void kvm_arch_sync_events(struct kvm *kvm); |
e9b11c17 | 1659 | |
3d80840d | 1660 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); |
682c59a3 | 1661 | |
b14b2690 | 1662 | struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn); |
284dc493 | 1663 | bool kvm_is_zone_device_page(struct page *page); |
c77fb9dc | 1664 | |
62c476c7 BAY |
1665 | struct kvm_irq_ack_notifier { |
1666 | struct hlist_node link; | |
1667 | unsigned gsi; | |
1668 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); | |
1669 | }; | |
1670 | ||
9957c86d PM |
1671 | int kvm_irq_map_gsi(struct kvm *kvm, |
1672 | struct kvm_kernel_irq_routing_entry *entries, int gsi); | |
1673 | int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); | |
8ba918d4 | 1674 | |
aa2fbe6d YZ |
1675 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, |
1676 | bool line_status); | |
bd2b53b2 | 1677 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, |
aa2fbe6d | 1678 | int irq_source_id, int level, bool line_status); |
b97e6de9 PB |
1679 | int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, |
1680 | struct kvm *kvm, int irq_source_id, | |
1681 | int level, bool line_status); | |
c7c9c56c | 1682 | bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); |
ba1aefcd | 1683 | void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); |
44882eed | 1684 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); |
3de42dc0 XZ |
1685 | void kvm_register_irq_ack_notifier(struct kvm *kvm, |
1686 | struct kvm_irq_ack_notifier *kian); | |
fa40a821 MT |
1687 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, |
1688 | struct kvm_irq_ack_notifier *kian); | |
5550af4d SY |
1689 | int kvm_request_irq_source_id(struct kvm *kvm); |
1690 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); | |
cdc238eb | 1691 | bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); |
62c476c7 | 1692 | |
9d4cba7f | 1693 | /* |
a54d8066 | 1694 | * Returns a pointer to the memslot if it contains gfn. |
0f22af94 DM |
1695 | * Otherwise returns NULL. |
1696 | */ | |
1697 | static inline struct kvm_memory_slot * | |
a54d8066 | 1698 | try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn) |
0f22af94 | 1699 | { |
a54d8066 | 1700 | if (!slot) |
0f22af94 DM |
1701 | return NULL; |
1702 | ||
0f22af94 DM |
1703 | if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) |
1704 | return slot; | |
1705 | else | |
1706 | return NULL; | |
1707 | } | |
1708 | ||
1709 | /* | |
a54d8066 | 1710 | * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL. |
0577d1ab | 1711 | * |
c928bfc2 MS |
1712 | * With "approx" set returns the memslot also when the address falls |
1713 | * in a hole. In that case one of the memslots bordering the hole is | |
1714 | * returned. | |
9d4cba7f PM |
1715 | */ |
1716 | static inline struct kvm_memory_slot * | |
a54d8066 | 1717 | search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx) |
9d4cba7f | 1718 | { |
0f22af94 | 1719 | struct kvm_memory_slot *slot; |
a54d8066 MS |
1720 | struct rb_node *node; |
1721 | int idx = slots->node_idx; | |
1722 | ||
1723 | slot = NULL; | |
1724 | for (node = slots->gfn_tree.rb_node; node; ) { | |
1725 | slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]); | |
1726 | if (gfn >= slot->base_gfn) { | |
1727 | if (gfn < slot->base_gfn + slot->npages) | |
1728 | return slot; | |
1729 | node = node->rb_right; | |
1730 | } else | |
1731 | node = node->rb_left; | |
c928bfc2 | 1732 | } |
9d4cba7f | 1733 | |
a54d8066 | 1734 | return approx ? slot : NULL; |
9d4cba7f PM |
1735 | } |
1736 | ||
1737 | static inline struct kvm_memory_slot * | |
c928bfc2 | 1738 | ____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx) |
9d4cba7f | 1739 | { |
0f22af94 | 1740 | struct kvm_memory_slot *slot; |
0f22af94 | 1741 | |
a54d8066 MS |
1742 | slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot); |
1743 | slot = try_get_memslot(slot, gfn); | |
0f22af94 DM |
1744 | if (slot) |
1745 | return slot; | |
1746 | ||
a54d8066 | 1747 | slot = search_memslots(slots, gfn, approx); |
0f22af94 | 1748 | if (slot) { |
a54d8066 | 1749 | atomic_long_set(&slots->last_used_slot, (unsigned long)slot); |
0f22af94 DM |
1750 | return slot; |
1751 | } | |
1752 | ||
1753 | return NULL; | |
9d4cba7f PM |
1754 | } |
1755 | ||
c928bfc2 MS |
1756 | /* |
1757 | * __gfn_to_memslot() and its descendants are here to allow arch code to inline | |
1758 | * the lookups in hot paths. gfn_to_memslot() itself isn't here as an inline | |
1759 | * because that would bloat other code too much. | |
1760 | */ | |
1761 | static inline struct kvm_memory_slot * | |
1762 | __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) | |
1763 | { | |
1764 | return ____gfn_to_memslot(slots, gfn, false); | |
1765 | } | |
1766 | ||
66a03505 | 1767 | static inline unsigned long |
8ca6f063 | 1768 | __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) |
66a03505 | 1769 | { |
da27a83f PB |
1770 | /* |
1771 | * The index was checked originally in search_memslots. To avoid | |
1772 | * that a malicious guest builds a Spectre gadget out of e.g. page | |
1773 | * table walks, do not let the processor speculate loads outside | |
1774 | * the guest's registered memslots. | |
1775 | */ | |
4422829e PB |
1776 | unsigned long offset = gfn - slot->base_gfn; |
1777 | offset = array_index_nospec(offset, slot->npages); | |
da27a83f | 1778 | return slot->userspace_addr + offset * PAGE_SIZE; |
66a03505 GS |
1779 | } |
1780 | ||
0ee8dcb8 XG |
1781 | static inline int memslot_id(struct kvm *kvm, gfn_t gfn) |
1782 | { | |
1783 | return gfn_to_memslot(kvm, gfn)->id; | |
1784 | } | |
1785 | ||
d19a748b TY |
1786 | static inline gfn_t |
1787 | hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) | |
887c08ac | 1788 | { |
d19a748b TY |
1789 | gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; |
1790 | ||
1791 | return slot->base_gfn + gfn_offset; | |
887c08ac XG |
1792 | } |
1793 | ||
1755fbcc AK |
1794 | static inline gpa_t gfn_to_gpa(gfn_t gfn) |
1795 | { | |
1796 | return (gpa_t)gfn << PAGE_SHIFT; | |
1797 | } | |
6aa8b732 | 1798 | |
c30a358d JR |
1799 | static inline gfn_t gpa_to_gfn(gpa_t gpa) |
1800 | { | |
1801 | return (gfn_t)(gpa >> PAGE_SHIFT); | |
1802 | } | |
1803 | ||
ba049e93 | 1804 | static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) |
62c476c7 BAY |
1805 | { |
1806 | return (hpa_t)pfn << PAGE_SHIFT; | |
1807 | } | |
1808 | ||
9e7325ac | 1809 | static inline bool kvm_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa) |
dfeec843 HC |
1810 | { |
1811 | unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); | |
1812 | ||
9e7325ac | 1813 | return !kvm_is_error_hva(hva); |
dfeec843 HC |
1814 | } |
1815 | ||
78b74638 PD |
1816 | static inline void kvm_gpc_mark_dirty_in_slot(struct gfn_to_pfn_cache *gpc) |
1817 | { | |
1818 | lockdep_assert_held(&gpc->lock); | |
1819 | ||
1820 | if (!gpc->memslot) | |
1821 | return; | |
1822 | ||
1823 | mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpa_to_gfn(gpc->gpa)); | |
dfeec843 HC |
1824 | } |
1825 | ||
ba1389b7 AK |
1826 | enum kvm_stat_kind { |
1827 | KVM_STAT_VM, | |
1828 | KVM_STAT_VCPU, | |
1829 | }; | |
1830 | ||
536a6f88 | 1831 | struct kvm_stat_data { |
536a6f88 | 1832 | struct kvm *kvm; |
bc9e9e67 | 1833 | const struct _kvm_stats_desc *desc; |
ba1389b7 | 1834 | enum kvm_stat_kind kind; |
417bc304 | 1835 | }; |
09cbcef6 | 1836 | |
cb082bfa JZ |
1837 | struct _kvm_stats_desc { |
1838 | struct kvm_stats_desc desc; | |
1839 | char name[KVM_STATS_NAME_SIZE]; | |
1840 | }; | |
1841 | ||
f95937cc | 1842 | #define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \ |
cb082bfa JZ |
1843 | .flags = type | unit | base | \ |
1844 | BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \ | |
1845 | BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \ | |
1846 | BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \ | |
1847 | .exponent = exp, \ | |
f95937cc JZ |
1848 | .size = sz, \ |
1849 | .bucket_size = bsz | |
cb082bfa | 1850 | |
f95937cc | 1851 | #define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ |
cb082bfa JZ |
1852 | { \ |
1853 | { \ | |
f95937cc | 1854 | STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ |
cb082bfa JZ |
1855 | .offset = offsetof(struct kvm_vm_stat, generic.stat) \ |
1856 | }, \ | |
1857 | .name = #stat, \ | |
1858 | } | |
f95937cc | 1859 | #define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ |
cb082bfa JZ |
1860 | { \ |
1861 | { \ | |
f95937cc | 1862 | STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ |
cb082bfa JZ |
1863 | .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \ |
1864 | }, \ | |
1865 | .name = #stat, \ | |
1866 | } | |
f95937cc | 1867 | #define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ |
cb082bfa JZ |
1868 | { \ |
1869 | { \ | |
f95937cc | 1870 | STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ |
cb082bfa JZ |
1871 | .offset = offsetof(struct kvm_vm_stat, stat) \ |
1872 | }, \ | |
1873 | .name = #stat, \ | |
1874 | } | |
f95937cc | 1875 | #define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ |
cb082bfa JZ |
1876 | { \ |
1877 | { \ | |
f95937cc | 1878 | STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ |
cb082bfa JZ |
1879 | .offset = offsetof(struct kvm_vcpu_stat, stat) \ |
1880 | }, \ | |
1881 | .name = #stat, \ | |
1882 | } | |
1883 | /* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */ | |
f95937cc JZ |
1884 | #define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \ |
1885 | SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz) | |
cb082bfa JZ |
1886 | |
1887 | #define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \ | |
f95937cc JZ |
1888 | STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, \ |
1889 | unit, base, exponent, 1, 0) | |
cb082bfa | 1890 | #define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \ |
f95937cc JZ |
1891 | STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, \ |
1892 | unit, base, exponent, 1, 0) | |
cb082bfa | 1893 | #define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \ |
f95937cc JZ |
1894 | STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, \ |
1895 | unit, base, exponent, 1, 0) | |
1896 | #define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz) \ | |
1897 | STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST, \ | |
1898 | unit, base, exponent, sz, bsz) | |
1899 | #define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz) \ | |
1900 | STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST, \ | |
1901 | unit, base, exponent, sz, 0) | |
cb082bfa JZ |
1902 | |
1903 | /* Cumulative counter, read/write */ | |
1904 | #define STATS_DESC_COUNTER(SCOPE, name) \ | |
1905 | STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \ | |
1906 | KVM_STATS_BASE_POW10, 0) | |
1907 | /* Instantaneous counter, read only */ | |
1908 | #define STATS_DESC_ICOUNTER(SCOPE, name) \ | |
1909 | STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \ | |
1910 | KVM_STATS_BASE_POW10, 0) | |
1911 | /* Peak counter, read/write */ | |
1912 | #define STATS_DESC_PCOUNTER(SCOPE, name) \ | |
1913 | STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \ | |
1914 | KVM_STATS_BASE_POW10, 0) | |
1915 | ||
1b870fa5 PB |
1916 | /* Instantaneous boolean value, read only */ |
1917 | #define STATS_DESC_IBOOLEAN(SCOPE, name) \ | |
1918 | STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \ | |
1919 | KVM_STATS_BASE_POW10, 0) | |
1920 | /* Peak (sticky) boolean value, read/write */ | |
1921 | #define STATS_DESC_PBOOLEAN(SCOPE, name) \ | |
1922 | STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \ | |
1923 | KVM_STATS_BASE_POW10, 0) | |
1924 | ||
cb082bfa JZ |
1925 | /* Cumulative time in nanosecond */ |
1926 | #define STATS_DESC_TIME_NSEC(SCOPE, name) \ | |
1927 | STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ | |
1928 | KVM_STATS_BASE_POW10, -9) | |
f95937cc JZ |
1929 | /* Linear histogram for time in nanosecond */ |
1930 | #define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz) \ | |
1931 | STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ | |
1932 | KVM_STATS_BASE_POW10, -9, sz, bsz) | |
1933 | /* Logarithmic histogram for time in nanosecond */ | |
1934 | #define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz) \ | |
1935 | STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ | |
1936 | KVM_STATS_BASE_POW10, -9, sz) | |
cb082bfa | 1937 | |
fcfe1bae | 1938 | #define KVM_GENERIC_VM_STATS() \ |
3cc4e148 JZ |
1939 | STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush), \ |
1940 | STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests) | |
fcfe1bae | 1941 | |
ce55c049 JZ |
1942 | #define KVM_GENERIC_VCPU_STATS() \ |
1943 | STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \ | |
1944 | STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \ | |
1945 | STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \ | |
1946 | STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \ | |
1947 | STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \ | |
87bcc5fa | 1948 | STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns), \ |
8ccba534 JZ |
1949 | STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns), \ |
1950 | STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist, \ | |
1951 | HALT_POLL_HIST_COUNT), \ | |
1952 | STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \ | |
1953 | HALT_POLL_HIST_COUNT), \ | |
1954 | STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \ | |
c3858335 | 1955 | HALT_POLL_HIST_COUNT), \ |
1b870fa5 | 1956 | STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking) |
ce55c049 | 1957 | |
76f7c879 | 1958 | extern struct dentry *kvm_debugfs_dir; |
f95937cc | 1959 | |
cb082bfa JZ |
1960 | ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header, |
1961 | const struct _kvm_stats_desc *desc, | |
1962 | void *stats, size_t size_stats, | |
1963 | char __user *user_buffer, size_t size, loff_t *offset); | |
f95937cc JZ |
1964 | |
1965 | /** | |
1966 | * kvm_stats_linear_hist_update() - Update bucket value for linear histogram | |
1967 | * statistics data. | |
1968 | * | |
1969 | * @data: start address of the stats data | |
1970 | * @size: the number of bucket of the stats data | |
1971 | * @value: the new value used to update the linear histogram's bucket | |
1972 | * @bucket_size: the size (width) of a bucket | |
1973 | */ | |
1974 | static inline void kvm_stats_linear_hist_update(u64 *data, size_t size, | |
1975 | u64 value, size_t bucket_size) | |
1976 | { | |
1977 | size_t index = div64_u64(value, bucket_size); | |
1978 | ||
1979 | index = min(index, size - 1); | |
1980 | ++data[index]; | |
1981 | } | |
1982 | ||
1983 | /** | |
1984 | * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram | |
1985 | * statistics data. | |
1986 | * | |
1987 | * @data: start address of the stats data | |
1988 | * @size: the number of bucket of the stats data | |
1989 | * @value: the new value used to update the logarithmic histogram's bucket | |
1990 | */ | |
1991 | static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value) | |
1992 | { | |
1993 | size_t index = fls64(value); | |
1994 | ||
1995 | index = min(index, size - 1); | |
1996 | ++data[index]; | |
1997 | } | |
1998 | ||
1999 | #define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize) \ | |
2000 | kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize) | |
2001 | #define KVM_STATS_LOG_HIST_UPDATE(array, value) \ | |
2002 | kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value) | |
2003 | ||
2004 | ||
fcfe1bae JZ |
2005 | extern const struct kvm_stats_header kvm_vm_stats_header; |
2006 | extern const struct _kvm_stats_desc kvm_vm_stats_desc[]; | |
ce55c049 JZ |
2007 | extern const struct kvm_stats_header kvm_vcpu_stats_header; |
2008 | extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[]; | |
d4c9ff2d | 2009 | |
f128cf8c | 2010 | #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER |
20ec3ebd | 2011 | static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq) |
e930bffe | 2012 | { |
20ec3ebd | 2013 | if (unlikely(kvm->mmu_invalidate_in_progress)) |
e930bffe AA |
2014 | return 1; |
2015 | /* | |
20ec3ebd CP |
2016 | * Ensure the read of mmu_invalidate_in_progress happens before |
2017 | * the read of mmu_invalidate_seq. This interacts with the | |
2018 | * smp_wmb() in mmu_notifier_invalidate_range_end to make sure | |
2019 | * that the caller either sees the old (non-zero) value of | |
2020 | * mmu_invalidate_in_progress or the new (incremented) value of | |
2021 | * mmu_invalidate_seq. | |
2022 | * | |
2023 | * PowerPC Book3s HV KVM calls this under a per-page lock rather | |
2024 | * than under kvm->mmu_lock, for scalability, so can't rely on | |
2025 | * kvm->mmu_lock to keep things ordered. | |
e930bffe | 2026 | */ |
a355aa54 | 2027 | smp_rmb(); |
20ec3ebd | 2028 | if (kvm->mmu_invalidate_seq != mmu_seq) |
e930bffe AA |
2029 | return 1; |
2030 | return 0; | |
2031 | } | |
4a42d848 | 2032 | |
8569992d | 2033 | static inline int mmu_invalidate_retry_gfn(struct kvm *kvm, |
20ec3ebd | 2034 | unsigned long mmu_seq, |
8569992d | 2035 | gfn_t gfn) |
4a42d848 DS |
2036 | { |
2037 | lockdep_assert_held(&kvm->mmu_lock); | |
2038 | /* | |
20ec3ebd CP |
2039 | * If mmu_invalidate_in_progress is non-zero, then the range maintained |
2040 | * by kvm_mmu_notifier_invalidate_range_start contains all addresses | |
2041 | * that might be being invalidated. Note that it may include some false | |
4a42d848 DS |
2042 | * positives, due to shortcuts when handing concurrent invalidations. |
2043 | */ | |
8569992d CP |
2044 | if (unlikely(kvm->mmu_invalidate_in_progress)) { |
2045 | /* | |
2046 | * Dropping mmu_lock after bumping mmu_invalidate_in_progress | |
2047 | * but before updating the range is a KVM bug. | |
2048 | */ | |
2049 | if (WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA || | |
2050 | kvm->mmu_invalidate_range_end == INVALID_GPA)) | |
2051 | return 1; | |
2052 | ||
2053 | if (gfn >= kvm->mmu_invalidate_range_start && | |
2054 | gfn < kvm->mmu_invalidate_range_end) | |
2055 | return 1; | |
2056 | } | |
2057 | ||
20ec3ebd | 2058 | if (kvm->mmu_invalidate_seq != mmu_seq) |
4a42d848 DS |
2059 | return 1; |
2060 | return 0; | |
2061 | } | |
d02c357e SC |
2062 | |
2063 | /* | |
2064 | * This lockless version of the range-based retry check *must* be paired with a | |
2065 | * call to the locked version after acquiring mmu_lock, i.e. this is safe to | |
2066 | * use only as a pre-check to avoid contending mmu_lock. This version *will* | |
2067 | * get false negatives and false positives. | |
2068 | */ | |
2069 | static inline bool mmu_invalidate_retry_gfn_unsafe(struct kvm *kvm, | |
2070 | unsigned long mmu_seq, | |
2071 | gfn_t gfn) | |
2072 | { | |
2073 | /* | |
2074 | * Use READ_ONCE() to ensure the in-progress flag and sequence counter | |
2075 | * are always read from memory, e.g. so that checking for retry in a | |
2076 | * loop won't result in an infinite retry loop. Don't force loads for | |
2077 | * start+end, as the key to avoiding infinite retry loops is observing | |
2078 | * the 1=>0 transition of in-progress, i.e. getting false negatives | |
2079 | * due to stale start+end values is acceptable. | |
2080 | */ | |
2081 | if (unlikely(READ_ONCE(kvm->mmu_invalidate_in_progress)) && | |
2082 | gfn >= kvm->mmu_invalidate_range_start && | |
2083 | gfn < kvm->mmu_invalidate_range_end) | |
2084 | return true; | |
2085 | ||
2086 | return READ_ONCE(kvm->mmu_invalidate_seq) != mmu_seq; | |
2087 | } | |
e930bffe AA |
2088 | #endif |
2089 | ||
a725d56a | 2090 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
399ec807 | 2091 | |
ddc9cfb7 | 2092 | #define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */ |
399ec807 | 2093 | |
5c0aea0e | 2094 | bool kvm_arch_can_set_irq_routing(struct kvm *kvm); |
399ec807 AK |
2095 | int kvm_set_irq_routing(struct kvm *kvm, |
2096 | const struct kvm_irq_routing_entry *entries, | |
2097 | unsigned nr, | |
2098 | unsigned flags); | |
c63cf538 RK |
2099 | int kvm_set_routing_entry(struct kvm *kvm, |
2100 | struct kvm_kernel_irq_routing_entry *e, | |
e8cde093 | 2101 | const struct kvm_irq_routing_entry *ue); |
399ec807 AK |
2102 | void kvm_free_irq_routing(struct kvm *kvm); |
2103 | ||
2104 | #else | |
2105 | ||
2106 | static inline void kvm_free_irq_routing(struct kvm *kvm) {} | |
2107 | ||
2108 | #endif | |
2109 | ||
297e2105 PM |
2110 | int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); |
2111 | ||
d34e6b17 | 2112 | void kvm_eventfd_init(struct kvm *kvm); |
914daba8 AG |
2113 | int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); |
2114 | ||
c5b31cc2 | 2115 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
d4db2935 | 2116 | int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); |
721eecbf | 2117 | void kvm_irqfd_release(struct kvm *kvm); |
fef8f2b9 DM |
2118 | bool kvm_notify_irqfd_resampler(struct kvm *kvm, |
2119 | unsigned int irqchip, | |
2120 | unsigned int pin); | |
9957c86d | 2121 | void kvm_irq_routing_update(struct kvm *); |
914daba8 AG |
2122 | #else |
2123 | static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) | |
2124 | { | |
2125 | return -EINVAL; | |
2126 | } | |
2127 | ||
2128 | static inline void kvm_irqfd_release(struct kvm *kvm) {} | |
fef8f2b9 DM |
2129 | |
2130 | static inline bool kvm_notify_irqfd_resampler(struct kvm *kvm, | |
2131 | unsigned int irqchip, | |
2132 | unsigned int pin) | |
2133 | { | |
2134 | return false; | |
2135 | } | |
c5b31cc2 | 2136 | #endif /* CONFIG_HAVE_KVM_IRQCHIP */ |
721eecbf | 2137 | |
07646749 SO |
2138 | void kvm_arch_irq_routing_update(struct kvm *kvm); |
2139 | ||
df06dae3 | 2140 | static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu) |
a8eeb04a | 2141 | { |
2e4682ba PB |
2142 | /* |
2143 | * Ensure the rest of the request is published to kvm_check_request's | |
2144 | * caller. Paired with the smp_mb__after_atomic in kvm_check_request. | |
2145 | */ | |
2146 | smp_wmb(); | |
86dafed5 | 2147 | set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); |
a8eeb04a AK |
2148 | } |
2149 | ||
df06dae3 SC |
2150 | static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) |
2151 | { | |
2152 | /* | |
2153 | * Request that don't require vCPU action should never be logged in | |
2154 | * vcpu->requests. The vCPU won't clear the request, so it will stay | |
2155 | * logged indefinitely and prevent the vCPU from entering the guest. | |
2156 | */ | |
2157 | BUILD_BUG_ON(!__builtin_constant_p(req) || | |
2158 | (req & KVM_REQUEST_NO_ACTION)); | |
2159 | ||
2160 | __kvm_make_request(req, vcpu); | |
2161 | } | |
2162 | ||
2fa6e1e1 RK |
2163 | static inline bool kvm_request_pending(struct kvm_vcpu *vcpu) |
2164 | { | |
2165 | return READ_ONCE(vcpu->requests); | |
2166 | } | |
2167 | ||
72875d8a RK |
2168 | static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu) |
2169 | { | |
86dafed5 | 2170 | return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); |
72875d8a RK |
2171 | } |
2172 | ||
2173 | static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu) | |
2174 | { | |
86dafed5 | 2175 | clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); |
72875d8a RK |
2176 | } |
2177 | ||
a8eeb04a AK |
2178 | static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) |
2179 | { | |
72875d8a RK |
2180 | if (kvm_test_request(req, vcpu)) { |
2181 | kvm_clear_request(req, vcpu); | |
2e4682ba PB |
2182 | |
2183 | /* | |
2184 | * Ensure the rest of the request is visible to kvm_check_request's | |
2185 | * caller. Paired with the smp_wmb in kvm_make_request. | |
2186 | */ | |
2187 | smp_mb__after_atomic(); | |
0719837c AK |
2188 | return true; |
2189 | } else { | |
2190 | return false; | |
2191 | } | |
a8eeb04a AK |
2192 | } |
2193 | ||
441f7bfa | 2194 | #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING |
8b415dcd | 2195 | extern bool kvm_rebooting; |
441f7bfa | 2196 | #endif |
8b415dcd | 2197 | |
ec76d819 SJS |
2198 | extern unsigned int halt_poll_ns; |
2199 | extern unsigned int halt_poll_ns_grow; | |
49113d36 | 2200 | extern unsigned int halt_poll_ns_grow_start; |
ec76d819 SJS |
2201 | extern unsigned int halt_poll_ns_shrink; |
2202 | ||
852b6d57 | 2203 | struct kvm_device { |
8538cb22 | 2204 | const struct kvm_device_ops *ops; |
852b6d57 | 2205 | struct kvm *kvm; |
852b6d57 | 2206 | void *private; |
07f0a7bd | 2207 | struct list_head vm_node; |
852b6d57 SW |
2208 | }; |
2209 | ||
2210 | /* create, destroy, and name are mandatory */ | |
2211 | struct kvm_device_ops { | |
2212 | const char *name; | |
a28ebea2 CD |
2213 | |
2214 | /* | |
2215 | * create is called holding kvm->lock and any operations not suitable | |
2216 | * to do while holding the lock should be deferred to init (see | |
2217 | * below). | |
2218 | */ | |
852b6d57 SW |
2219 | int (*create)(struct kvm_device *dev, u32 type); |
2220 | ||
023e9fdd CD |
2221 | /* |
2222 | * init is called after create if create is successful and is called | |
2223 | * outside of holding kvm->lock. | |
2224 | */ | |
2225 | void (*init)(struct kvm_device *dev); | |
2226 | ||
852b6d57 SW |
2227 | /* |
2228 | * Destroy is responsible for freeing dev. | |
2229 | * | |
2230 | * Destroy may be called before or after destructors are called | |
2231 | * on emulated I/O regions, depending on whether a reference is | |
2232 | * held by a vcpu or other kvm component that gets destroyed | |
2233 | * after the emulated I/O. | |
2234 | */ | |
2235 | void (*destroy)(struct kvm_device *dev); | |
2236 | ||
2bde9b3e CLG |
2237 | /* |
2238 | * Release is an alternative method to free the device. It is | |
2239 | * called when the device file descriptor is closed. Once | |
2240 | * release is called, the destroy method will not be called | |
2241 | * anymore as the device is removed from the device list of | |
2242 | * the VM. kvm->lock is held. | |
2243 | */ | |
2244 | void (*release)(struct kvm_device *dev); | |
2245 | ||
852b6d57 SW |
2246 | int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); |
2247 | int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); | |
2248 | int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); | |
2249 | long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, | |
2250 | unsigned long arg); | |
a1cd3f08 | 2251 | int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma); |
852b6d57 SW |
2252 | }; |
2253 | ||
852b6d57 | 2254 | struct kvm_device *kvm_device_from_filp(struct file *filp); |
8538cb22 | 2255 | int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type); |
571ee1b6 | 2256 | void kvm_unregister_device_ops(u32 type); |
852b6d57 | 2257 | |
5df554ad | 2258 | extern struct kvm_device_ops kvm_mpic_ops; |
ea2f83a7 | 2259 | extern struct kvm_device_ops kvm_arm_vgic_v2_ops; |
a0675c25 | 2260 | extern struct kvm_device_ops kvm_arm_vgic_v3_ops; |
5df554ad | 2261 | |
4c088493 R |
2262 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT |
2263 | ||
2264 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) | |
2265 | { | |
2266 | vcpu->spin_loop.in_spin_loop = val; | |
2267 | } | |
2268 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) | |
2269 | { | |
2270 | vcpu->spin_loop.dy_eligible = val; | |
2271 | } | |
2272 | ||
2273 | #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ | |
2274 | ||
2275 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) | |
2276 | { | |
2277 | } | |
2278 | ||
2279 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) | |
2280 | { | |
2281 | } | |
4c088493 | 2282 | #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ |
1a02b270 | 2283 | |
c36b7150 PB |
2284 | static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot) |
2285 | { | |
2286 | return (memslot && memslot->id < KVM_USER_MEM_SLOTS && | |
2287 | !(memslot->flags & KVM_MEMSLOT_INVALID)); | |
2288 | } | |
2289 | ||
7495e22b | 2290 | struct kvm_vcpu *kvm_get_running_vcpu(void); |
fcd07f9a | 2291 | struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); |
7495e22b | 2292 | |
1a02b270 | 2293 | #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS |
14717e20 | 2294 | bool kvm_arch_has_irq_bypass(void); |
1a02b270 EA |
2295 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, |
2296 | struct irq_bypass_producer *); | |
2297 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, | |
2298 | struct irq_bypass_producer *); | |
2299 | void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); | |
2300 | void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); | |
f70c20aa FW |
2301 | int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, |
2302 | uint32_t guest_irq, bool set); | |
515a0c79 LM |
2303 | bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *, |
2304 | struct kvm_kernel_irq_routing_entry *); | |
1a02b270 | 2305 | #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ |
35181e86 | 2306 | |
3491caf2 CB |
2307 | #ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS |
2308 | /* If we wakeup during the poll time, was it a sucessful poll? */ | |
2309 | static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) | |
2310 | { | |
2311 | return vcpu->valid_wakeup; | |
2312 | } | |
2313 | ||
2314 | #else | |
2315 | static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) | |
2316 | { | |
2317 | return true; | |
2318 | } | |
2319 | #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ | |
2320 | ||
cdd6ad3a CB |
2321 | #ifdef CONFIG_HAVE_KVM_NO_POLL |
2322 | /* Callback that tells if we must not poll */ | |
2323 | bool kvm_arch_no_poll(struct kvm_vcpu *vcpu); | |
2324 | #else | |
2325 | static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) | |
2326 | { | |
2327 | return false; | |
2328 | } | |
2329 | #endif /* CONFIG_HAVE_KVM_NO_POLL */ | |
2330 | ||
5cb0944c PB |
2331 | #ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL |
2332 | long kvm_arch_vcpu_async_ioctl(struct file *filp, | |
2333 | unsigned int ioctl, unsigned long arg); | |
2334 | #else | |
2335 | static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, | |
2336 | unsigned int ioctl, | |
2337 | unsigned long arg) | |
2338 | { | |
2339 | return -ENOIOCTLCMD; | |
2340 | } | |
2341 | #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ | |
2342 | ||
683412cc MZ |
2343 | void kvm_arch_guest_memory_reclaimed(struct kvm *kvm); |
2344 | ||
bd2a6394 CD |
2345 | #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE |
2346 | int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); | |
2347 | #else | |
2348 | static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) | |
2349 | { | |
2350 | return 0; | |
2351 | } | |
2352 | #endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */ | |
2353 | ||
c57c8046 JS |
2354 | typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data); |
2355 | ||
2356 | int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, | |
2357 | uintptr_t data, const char *name, | |
2358 | struct task_struct **thread_ptr); | |
2359 | ||
935ace2f TG |
2360 | #ifdef CONFIG_KVM_XFER_TO_GUEST_WORK |
2361 | static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu) | |
2362 | { | |
2363 | vcpu->run->exit_reason = KVM_EXIT_INTR; | |
2364 | vcpu->stat.signal_exits++; | |
2365 | } | |
2366 | #endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */ | |
2367 | ||
43a063ca YA |
2368 | /* |
2369 | * If more than one page is being (un)accounted, @virt must be the address of | |
2370 | * the first page of a block of pages what were allocated together (i.e | |
2371 | * accounted together). | |
2372 | * | |
2373 | * kvm_account_pgtable_pages() is thread-safe because mod_lruvec_page_state() | |
2374 | * is thread-safe. | |
2375 | */ | |
2376 | static inline void kvm_account_pgtable_pages(void *virt, int nr) | |
2377 | { | |
2378 | mod_lruvec_page_state(virt_to_page(virt), NR_SECONDARY_PAGETABLE, nr); | |
2379 | } | |
2380 | ||
fb04a1ed PX |
2381 | /* |
2382 | * This defines how many reserved entries we want to keep before we | |
2383 | * kick the vcpu to the userspace to avoid dirty ring full. This | |
2384 | * value can be tuned to higher if e.g. PML is enabled on the host. | |
2385 | */ | |
2386 | #define KVM_DIRTY_RING_RSVD_ENTRIES 64 | |
2387 | ||
2388 | /* Max number of entries allowed for each kvm dirty ring */ | |
2389 | #define KVM_DIRTY_RING_MAX_ENTRIES 65536 | |
2390 | ||
16f95f3b | 2391 | static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu, |
8dd2eee9 CP |
2392 | gpa_t gpa, gpa_t size, |
2393 | bool is_write, bool is_exec, | |
2394 | bool is_private) | |
16f95f3b CP |
2395 | { |
2396 | vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT; | |
2397 | vcpu->run->memory_fault.gpa = gpa; | |
2398 | vcpu->run->memory_fault.size = size; | |
2399 | ||
8dd2eee9 | 2400 | /* RWX flags are not (yet) defined or communicated to userspace. */ |
16f95f3b | 2401 | vcpu->run->memory_fault.flags = 0; |
8dd2eee9 CP |
2402 | if (is_private) |
2403 | vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE; | |
16f95f3b CP |
2404 | } |
2405 | ||
5a475554 CP |
2406 | #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES |
2407 | static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn) | |
2408 | { | |
2409 | return xa_to_value(xa_load(&kvm->mem_attr_array, gfn)); | |
2410 | } | |
2411 | ||
2412 | bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end, | |
2413 | unsigned long attrs); | |
2414 | bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm, | |
2415 | struct kvm_gfn_range *range); | |
2416 | bool kvm_arch_post_set_memory_attributes(struct kvm *kvm, | |
2417 | struct kvm_gfn_range *range); | |
a7800aa8 SC |
2418 | |
2419 | static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) | |
2420 | { | |
2421 | return IS_ENABLED(CONFIG_KVM_PRIVATE_MEM) && | |
2422 | kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE; | |
2423 | } | |
2424 | #else | |
2425 | static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) | |
2426 | { | |
2427 | return false; | |
2428 | } | |
5a475554 CP |
2429 | #endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */ |
2430 | ||
a7800aa8 SC |
2431 | #ifdef CONFIG_KVM_PRIVATE_MEM |
2432 | int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, | |
2433 | gfn_t gfn, kvm_pfn_t *pfn, int *max_order); | |
2434 | #else | |
2435 | static inline int kvm_gmem_get_pfn(struct kvm *kvm, | |
2436 | struct kvm_memory_slot *slot, gfn_t gfn, | |
2437 | kvm_pfn_t *pfn, int *max_order) | |
2438 | { | |
2439 | KVM_BUG_ON(1, kvm); | |
2440 | return -EIO; | |
2441 | } | |
2442 | #endif /* CONFIG_KVM_PRIVATE_MEM */ | |
2443 | ||
bfd99ff5 | 2444 | #endif |