Commit | Line | Data |
---|---|---|
20c8ccb1 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
edf88417 AK |
2 | #ifndef __KVM_HOST_H |
3 | #define __KVM_HOST_H | |
6aa8b732 | 4 | |
6aa8b732 AK |
5 | |
6 | #include <linux/types.h> | |
e56a7a28 | 7 | #include <linux/hardirq.h> |
6aa8b732 AK |
8 | #include <linux/list.h> |
9 | #include <linux/mutex.h> | |
10 | #include <linux/spinlock.h> | |
06ff0d37 MR |
11 | #include <linux/signal.h> |
12 | #include <linux/sched.h> | |
187f1882 | 13 | #include <linux/bug.h> |
6aa8b732 | 14 | #include <linux/mm.h> |
b297e672 | 15 | #include <linux/mmu_notifier.h> |
15ad7146 | 16 | #include <linux/preempt.h> |
0937c48d | 17 | #include <linux/msi.h> |
d89f5eff | 18 | #include <linux/slab.h> |
d1e5b0e9 | 19 | #include <linux/vmalloc.h> |
bd2b53b2 | 20 | #include <linux/rcupdate.h> |
bd80158a | 21 | #include <linux/ratelimit.h> |
83f09228 | 22 | #include <linux/err.h> |
c11f11fc | 23 | #include <linux/irqflags.h> |
521921ba | 24 | #include <linux/context_tracking.h> |
1a02b270 | 25 | #include <linux/irqbypass.h> |
8577370f | 26 | #include <linux/swait.h> |
e3736c3e | 27 | #include <linux/refcount.h> |
1d487e9b | 28 | #include <linux/nospec.h> |
e8edc6e0 | 29 | #include <asm/signal.h> |
6aa8b732 | 30 | |
6aa8b732 | 31 | #include <linux/kvm.h> |
102d8325 | 32 | #include <linux/kvm_para.h> |
6aa8b732 | 33 | |
edf88417 | 34 | #include <linux/kvm_types.h> |
d77a39d9 | 35 | |
edf88417 | 36 | #include <asm/kvm_host.h> |
d657a98e | 37 | |
0b1b1dfd GK |
38 | #ifndef KVM_MAX_VCPU_ID |
39 | #define KVM_MAX_VCPU_ID KVM_MAX_VCPUS | |
40 | #endif | |
41 | ||
67b29204 XG |
42 | /* |
43 | * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used | |
44 | * in kvm, other bits are visible for userspace which are defined in | |
45 | * include/linux/kvm_h. | |
46 | */ | |
47 | #define KVM_MEMSLOT_INVALID (1UL << 16) | |
48 | ||
361209e0 | 49 | /* |
164bf7e5 | 50 | * Bit 63 of the memslot generation number is an "update in-progress flag", |
361209e0 SC |
51 | * e.g. is temporarily set for the duration of install_new_memslots(). |
52 | * This flag effectively creates a unique generation number that is used to | |
53 | * mark cached memslot data, e.g. MMIO accesses, as potentially being stale, | |
54 | * i.e. may (or may not) have come from the previous memslots generation. | |
55 | * | |
56 | * This is necessary because the actual memslots update is not atomic with | |
57 | * respect to the generation number update. Updating the generation number | |
58 | * first would allow a vCPU to cache a spte from the old memslots using the | |
59 | * new generation number, and updating the generation number after switching | |
60 | * to the new memslots would allow cache hits using the old generation number | |
61 | * to reference the defunct memslots. | |
62 | * | |
63 | * This mechanism is used to prevent getting hits in KVM's caches while a | |
64 | * memslot update is in-progress, and to prevent cache hits *after* updating | |
65 | * the actual generation number against accesses that were inserted into the | |
66 | * cache *before* the memslots were updated. | |
67 | */ | |
164bf7e5 | 68 | #define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63) |
361209e0 | 69 | |
87da7e66 XG |
70 | /* Two fragments for cross MMIO pages. */ |
71 | #define KVM_MAX_MMIO_FRAGMENTS 2 | |
f78146b0 | 72 | |
f481b069 PB |
73 | #ifndef KVM_ADDRESS_SPACE_NUM |
74 | #define KVM_ADDRESS_SPACE_NUM 1 | |
75 | #endif | |
76 | ||
9c5b1172 XG |
77 | /* |
78 | * For the normal pfn, the highest 12 bits should be zero, | |
81c52c56 XG |
79 | * so we can mask bit 62 ~ bit 52 to indicate the error pfn, |
80 | * mask bit 63 to indicate the noslot pfn. | |
9c5b1172 | 81 | */ |
81c52c56 XG |
82 | #define KVM_PFN_ERR_MASK (0x7ffULL << 52) |
83 | #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) | |
84 | #define KVM_PFN_NOSLOT (0x1ULL << 63) | |
9c5b1172 XG |
85 | |
86 | #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) | |
87 | #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) | |
81c52c56 | 88 | #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) |
6c8ee57b | 89 | |
81c52c56 XG |
90 | /* |
91 | * error pfns indicate that the gfn is in slot but faild to | |
92 | * translate it to pfn on host. | |
93 | */ | |
ba049e93 | 94 | static inline bool is_error_pfn(kvm_pfn_t pfn) |
83f09228 | 95 | { |
9c5b1172 | 96 | return !!(pfn & KVM_PFN_ERR_MASK); |
83f09228 XG |
97 | } |
98 | ||
81c52c56 XG |
99 | /* |
100 | * error_noslot pfns indicate that the gfn can not be | |
101 | * translated to pfn - it is not in slot or failed to | |
102 | * translate it to pfn. | |
103 | */ | |
ba049e93 | 104 | static inline bool is_error_noslot_pfn(kvm_pfn_t pfn) |
83f09228 | 105 | { |
81c52c56 | 106 | return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); |
83f09228 XG |
107 | } |
108 | ||
81c52c56 | 109 | /* noslot pfn indicates that the gfn is not in slot. */ |
ba049e93 | 110 | static inline bool is_noslot_pfn(kvm_pfn_t pfn) |
83f09228 | 111 | { |
81c52c56 | 112 | return pfn == KVM_PFN_NOSLOT; |
83f09228 XG |
113 | } |
114 | ||
bf640876 DD |
115 | /* |
116 | * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390) | |
117 | * provide own defines and kvm_is_error_hva | |
118 | */ | |
119 | #ifndef KVM_HVA_ERR_BAD | |
120 | ||
7068d097 XG |
121 | #define KVM_HVA_ERR_BAD (PAGE_OFFSET) |
122 | #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) | |
ca3a490c XG |
123 | |
124 | static inline bool kvm_is_error_hva(unsigned long addr) | |
125 | { | |
7068d097 | 126 | return addr >= PAGE_OFFSET; |
ca3a490c XG |
127 | } |
128 | ||
bf640876 DD |
129 | #endif |
130 | ||
6cede2e6 XG |
131 | #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) |
132 | ||
9c5b1172 | 133 | static inline bool is_error_page(struct page *page) |
6cede2e6 XG |
134 | { |
135 | return IS_ERR(page); | |
136 | } | |
137 | ||
930f7fd6 RK |
138 | #define KVM_REQUEST_MASK GENMASK(7,0) |
139 | #define KVM_REQUEST_NO_WAKEUP BIT(8) | |
7a97cec2 | 140 | #define KVM_REQUEST_WAIT BIT(9) |
d9e368d6 | 141 | /* |
2860c4b1 PB |
142 | * Architecture-independent vcpu->requests bit members |
143 | * Bits 4-7 are reserved for more arch-independent bits. | |
d9e368d6 | 144 | */ |
7a97cec2 PB |
145 | #define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
146 | #define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | |
147 | #define KVM_REQ_PENDING_TIMER 2 | |
148 | #define KVM_REQ_UNHALT 3 | |
2387149e AJ |
149 | #define KVM_REQUEST_ARCH_BASE 8 |
150 | ||
151 | #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ | |
c593642c | 152 | BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \ |
2387149e AJ |
153 | (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \ |
154 | }) | |
155 | #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0) | |
0cd31043 | 156 | |
7a84428a AW |
157 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
158 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 | |
5550af4d | 159 | |
c16f862d | 160 | extern struct kmem_cache *kvm_vcpu_cache; |
6aa8b732 | 161 | |
0d9ce162 | 162 | extern struct mutex kvm_lock; |
fc1b7492 GL |
163 | extern struct list_head vm_list; |
164 | ||
743eeb0b SL |
165 | struct kvm_io_range { |
166 | gpa_t addr; | |
167 | int len; | |
168 | struct kvm_io_device *dev; | |
169 | }; | |
170 | ||
786a9f88 | 171 | #define NR_IOBUS_DEVS 1000 |
a1300716 | 172 | |
2eeb2e94 | 173 | struct kvm_io_bus { |
6ea34c9b AK |
174 | int dev_count; |
175 | int ioeventfd_count; | |
a1300716 | 176 | struct kvm_io_range range[]; |
2eeb2e94 GH |
177 | }; |
178 | ||
e93f8a0f MT |
179 | enum kvm_bus { |
180 | KVM_MMIO_BUS, | |
181 | KVM_PIO_BUS, | |
060f0ce6 | 182 | KVM_VIRTIO_CCW_NOTIFY_BUS, |
68c3b4d1 | 183 | KVM_FAST_MMIO_BUS, |
e93f8a0f MT |
184 | KVM_NR_BUSES |
185 | }; | |
186 | ||
e32edf4f | 187 | int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, |
e93f8a0f | 188 | int len, const void *val); |
e32edf4f NN |
189 | int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, |
190 | gpa_t addr, int len, const void *val, long cookie); | |
191 | int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, | |
192 | int len, void *val); | |
743eeb0b SL |
193 | int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, |
194 | int len, struct kvm_io_device *dev); | |
90db1043 DH |
195 | void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
196 | struct kvm_io_device *dev); | |
8a39d006 AP |
197 | struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
198 | gpa_t addr); | |
2eeb2e94 | 199 | |
af585b92 GN |
200 | #ifdef CONFIG_KVM_ASYNC_PF |
201 | struct kvm_async_pf { | |
202 | struct work_struct work; | |
203 | struct list_head link; | |
204 | struct list_head queue; | |
205 | struct kvm_vcpu *vcpu; | |
206 | struct mm_struct *mm; | |
736c291c | 207 | gpa_t cr2_or_gpa; |
af585b92 GN |
208 | unsigned long addr; |
209 | struct kvm_arch_async_pf arch; | |
f2e10669 | 210 | bool wakeup_all; |
af585b92 GN |
211 | }; |
212 | ||
213 | void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); | |
214 | void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); | |
736c291c SC |
215 | int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, |
216 | unsigned long hva, struct kvm_arch_async_pf *arch); | |
344d9588 | 217 | int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); |
af585b92 GN |
218 | #endif |
219 | ||
6b7e2d09 XG |
220 | enum { |
221 | OUTSIDE_GUEST_MODE, | |
222 | IN_GUEST_MODE, | |
c142786c AK |
223 | EXITING_GUEST_MODE, |
224 | READING_SHADOW_PAGE_TABLES, | |
6b7e2d09 XG |
225 | }; |
226 | ||
e45adf66 KA |
227 | #define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA) |
228 | ||
229 | struct kvm_host_map { | |
230 | /* | |
231 | * Only valid if the 'pfn' is managed by the host kernel (i.e. There is | |
232 | * a 'struct page' for it. When using mem= kernel parameter some memory | |
233 | * can be used as guest memory but they are not managed by host | |
234 | * kernel). | |
235 | * If 'pfn' is not managed by the host kernel, this field is | |
236 | * initialized to KVM_UNMAPPED_PAGE. | |
237 | */ | |
238 | struct page *page; | |
239 | void *hva; | |
240 | kvm_pfn_t pfn; | |
241 | kvm_pfn_t gfn; | |
242 | }; | |
243 | ||
244 | /* | |
245 | * Used to check if the mapping is valid or not. Never use 'kvm_host_map' | |
246 | * directly to check for that. | |
247 | */ | |
248 | static inline bool kvm_vcpu_mapped(struct kvm_host_map *map) | |
249 | { | |
250 | return !!map->hva; | |
251 | } | |
252 | ||
f78146b0 AK |
253 | /* |
254 | * Sometimes a large or cross-page mmio needs to be broken up into separate | |
255 | * exits for userspace servicing. | |
256 | */ | |
257 | struct kvm_mmio_fragment { | |
258 | gpa_t gpa; | |
259 | void *data; | |
260 | unsigned len; | |
261 | }; | |
262 | ||
d17fbbf7 ZX |
263 | struct kvm_vcpu { |
264 | struct kvm *kvm; | |
31bb117e | 265 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
d17fbbf7 | 266 | struct preempt_notifier preempt_notifier; |
31bb117e | 267 | #endif |
6b7e2d09 | 268 | int cpu; |
8750e72a RK |
269 | int vcpu_id; /* id given by userspace at creation */ |
270 | int vcpu_idx; /* index in kvm->vcpus array */ | |
6b7e2d09 XG |
271 | int srcu_idx; |
272 | int mode; | |
86dafed5 | 273 | u64 requests; |
d0bfb940 | 274 | unsigned long guest_debug; |
6b7e2d09 | 275 | |
bf9f6ac8 FW |
276 | int pre_pcpu; |
277 | struct list_head blocked_vcpu_list; | |
278 | ||
6b7e2d09 XG |
279 | struct mutex mutex; |
280 | struct kvm_run *run; | |
f656ce01 | 281 | |
8577370f | 282 | struct swait_queue_head wq; |
0e4524a5 | 283 | struct pid __rcu *pid; |
d17fbbf7 ZX |
284 | int sigset_active; |
285 | sigset_t sigset; | |
286 | struct kvm_vcpu_stat stat; | |
19020f8a | 287 | unsigned int halt_poll_ns; |
3491caf2 | 288 | bool valid_wakeup; |
d17fbbf7 | 289 | |
34c16eec | 290 | #ifdef CONFIG_HAS_IOMEM |
d17fbbf7 ZX |
291 | int mmio_needed; |
292 | int mmio_read_completed; | |
293 | int mmio_is_write; | |
f78146b0 AK |
294 | int mmio_cur_fragment; |
295 | int mmio_nr_fragments; | |
296 | struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; | |
34c16eec | 297 | #endif |
1165f5fe | 298 | |
af585b92 GN |
299 | #ifdef CONFIG_KVM_ASYNC_PF |
300 | struct { | |
301 | u32 queued; | |
302 | struct list_head queue; | |
303 | struct list_head done; | |
304 | spinlock_t lock; | |
305 | } async_pf; | |
306 | #endif | |
307 | ||
4c088493 R |
308 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT |
309 | /* | |
310 | * Cpu relax intercept or pause loop exit optimization | |
311 | * in_spin_loop: set when a vcpu does a pause loop exit | |
312 | * or cpu relax intercepted. | |
313 | * dy_eligible: indicates whether vcpu is eligible for directed yield. | |
314 | */ | |
315 | struct { | |
316 | bool in_spin_loop; | |
317 | bool dy_eligible; | |
318 | } spin_loop; | |
319 | #endif | |
3a08a8f9 | 320 | bool preempted; |
d73eb57b | 321 | bool ready; |
d657a98e | 322 | struct kvm_vcpu_arch arch; |
45b5939e | 323 | struct dentry *debugfs_dentry; |
d657a98e ZX |
324 | }; |
325 | ||
6b7e2d09 XG |
326 | static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) |
327 | { | |
cde9af6e AJ |
328 | /* |
329 | * The memory barrier ensures a previous write to vcpu->requests cannot | |
330 | * be reordered with the read of vcpu->mode. It pairs with the general | |
331 | * memory barrier following the write of vcpu->mode in VCPU RUN. | |
332 | */ | |
333 | smp_mb__before_atomic(); | |
6b7e2d09 XG |
334 | return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); |
335 | } | |
336 | ||
660c22c4 TY |
337 | /* |
338 | * Some of the bitops functions do not support too long bitmaps. | |
339 | * This number must be determined not to exceed such limits. | |
340 | */ | |
341 | #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) | |
342 | ||
6aa8b732 AK |
343 | struct kvm_memory_slot { |
344 | gfn_t base_gfn; | |
345 | unsigned long npages; | |
6aa8b732 | 346 | unsigned long *dirty_bitmap; |
db3fe4eb | 347 | struct kvm_arch_memory_slot arch; |
8a7ae055 | 348 | unsigned long userspace_addr; |
6104f472 | 349 | u32 flags; |
1e702d9a | 350 | short id; |
6aa8b732 AK |
351 | }; |
352 | ||
87bf6e7d TY |
353 | static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) |
354 | { | |
355 | return ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
356 | } | |
357 | ||
03133347 CI |
358 | static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot) |
359 | { | |
360 | unsigned long len = kvm_dirty_bitmap_bytes(memslot); | |
361 | ||
362 | return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap); | |
363 | } | |
364 | ||
84223598 CH |
365 | struct kvm_s390_adapter_int { |
366 | u64 ind_addr; | |
367 | u64 summary_addr; | |
368 | u64 ind_offset; | |
369 | u32 summary_offset; | |
370 | u32 adapter_id; | |
371 | }; | |
372 | ||
5c919412 AS |
373 | struct kvm_hv_sint { |
374 | u32 vcpu; | |
375 | u32 sint; | |
376 | }; | |
377 | ||
399ec807 AK |
378 | struct kvm_kernel_irq_routing_entry { |
379 | u32 gsi; | |
5116d8f6 | 380 | u32 type; |
4925663a | 381 | int (*set)(struct kvm_kernel_irq_routing_entry *e, |
aa2fbe6d YZ |
382 | struct kvm *kvm, int irq_source_id, int level, |
383 | bool line_status); | |
399ec807 AK |
384 | union { |
385 | struct { | |
386 | unsigned irqchip; | |
387 | unsigned pin; | |
388 | } irqchip; | |
0455e72c EA |
389 | struct { |
390 | u32 address_lo; | |
391 | u32 address_hi; | |
392 | u32 data; | |
393 | u32 flags; | |
394 | u32 devid; | |
395 | } msi; | |
84223598 | 396 | struct kvm_s390_adapter_int adapter; |
5c919412 | 397 | struct kvm_hv_sint hv_sint; |
399ec807 | 398 | }; |
46e624b9 GN |
399 | struct hlist_node link; |
400 | }; | |
401 | ||
b053b2ae SR |
402 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
403 | struct kvm_irq_routing_table { | |
404 | int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; | |
405 | u32 nr_rt_entries; | |
406 | /* | |
407 | * Array indexed by gsi. Each entry contains list of irq chips | |
408 | * the gsi is connected to. | |
409 | */ | |
410 | struct hlist_head map[0]; | |
411 | }; | |
412 | #endif | |
413 | ||
0743247f AW |
414 | #ifndef KVM_PRIVATE_MEM_SLOTS |
415 | #define KVM_PRIVATE_MEM_SLOTS 0 | |
416 | #endif | |
417 | ||
93a5cef0 | 418 | #ifndef KVM_MEM_SLOTS_NUM |
bbacc0c1 | 419 | #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) |
93a5cef0 XG |
420 | #endif |
421 | ||
f481b069 PB |
422 | #ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE |
423 | static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) | |
424 | { | |
425 | return 0; | |
426 | } | |
427 | #endif | |
428 | ||
bf3e05bc XG |
429 | /* |
430 | * Note: | |
431 | * memslots are not sorted by id anymore, please use id_to_memslot() | |
432 | * to get the memslot by its id. | |
433 | */ | |
46a26bf5 | 434 | struct kvm_memslots { |
49c7754c | 435 | u64 generation; |
93a5cef0 | 436 | struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; |
f85e2cb5 | 437 | /* The mapping table from slot id to the index in memslots[]. */ |
1e702d9a | 438 | short id_to_index[KVM_MEM_SLOTS_NUM]; |
d4ae84a0 | 439 | atomic_t lru_slot; |
9c1a5d38 | 440 | int used_slots; |
46a26bf5 MT |
441 | }; |
442 | ||
6aa8b732 | 443 | struct kvm { |
aaee2c94 | 444 | spinlock_t mmu_lock; |
79fac95e | 445 | struct mutex slots_lock; |
6d4e4c4f | 446 | struct mm_struct *mm; /* userspace tied to this vm */ |
a80cf7b5 | 447 | struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; |
fb3f0f51 | 448 | struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; |
6c7caebc PB |
449 | |
450 | /* | |
451 | * created_vcpus is protected by kvm->lock, and is incremented | |
452 | * at the beginning of KVM_CREATE_VCPU. online_vcpus is only | |
453 | * incremented after storing the kvm_vcpu pointer in vcpus, | |
454 | * and is accessed atomically. | |
455 | */ | |
73880c80 | 456 | atomic_t online_vcpus; |
6c7caebc | 457 | int created_vcpus; |
217ece61 | 458 | int last_boosted_vcpu; |
133de902 | 459 | struct list_head vm_list; |
60eead79 | 460 | struct mutex lock; |
4a12f951 | 461 | struct kvm_io_bus __rcu *buses[KVM_NR_BUSES]; |
721eecbf GH |
462 | #ifdef CONFIG_HAVE_KVM_EVENTFD |
463 | struct { | |
464 | spinlock_t lock; | |
465 | struct list_head items; | |
7a84428a AW |
466 | struct list_head resampler_list; |
467 | struct mutex resampler_lock; | |
721eecbf | 468 | } irqfds; |
d34e6b17 | 469 | struct list_head ioeventfds; |
721eecbf | 470 | #endif |
ba1389b7 | 471 | struct kvm_vm_stat stat; |
d69fb81f | 472 | struct kvm_arch arch; |
e3736c3e | 473 | refcount_t users_count; |
4b4357e0 | 474 | #ifdef CONFIG_KVM_MMIO |
5f94c174 | 475 | struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; |
2b3c246a SL |
476 | spinlock_t ring_lock; |
477 | struct list_head coalesced_zones; | |
5f94c174 | 478 | #endif |
e930bffe | 479 | |
60eead79 | 480 | struct mutex irq_lock; |
75858a84 | 481 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
bd2b53b2 | 482 | /* |
9957c86d | 483 | * Update side is protected by irq_lock. |
bd2b53b2 | 484 | */ |
4b6a2872 | 485 | struct kvm_irq_routing_table __rcu *irq_routing; |
c77dcacb PB |
486 | #endif |
487 | #ifdef CONFIG_HAVE_KVM_IRQFD | |
136bdfee | 488 | struct hlist_head irq_ack_notifier_list; |
75858a84 AK |
489 | #endif |
490 | ||
36c1ed82 | 491 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
e930bffe AA |
492 | struct mmu_notifier mmu_notifier; |
493 | unsigned long mmu_notifier_seq; | |
494 | long mmu_notifier_count; | |
495 | #endif | |
a086f6a1 | 496 | long tlbs_dirty; |
07f0a7bd | 497 | struct list_head devices; |
2a31b9db | 498 | bool manual_dirty_log_protect; |
536a6f88 JF |
499 | struct dentry *debugfs_dentry; |
500 | struct kvm_stat_data **debugfs_stat_data; | |
6ade8694 PM |
501 | struct srcu_struct srcu; |
502 | struct srcu_struct irq_srcu; | |
fdeaf7e3 | 503 | pid_t userspace_pid; |
6aa8b732 AK |
504 | }; |
505 | ||
a737f256 CD |
506 | #define kvm_err(fmt, ...) \ |
507 | pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) | |
508 | #define kvm_info(fmt, ...) \ | |
509 | pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) | |
510 | #define kvm_debug(fmt, ...) \ | |
511 | pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) | |
ae0f5499 BD |
512 | #define kvm_debug_ratelimited(fmt, ...) \ |
513 | pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \ | |
514 | ## __VA_ARGS__) | |
a737f256 CD |
515 | #define kvm_pr_unimpl(fmt, ...) \ |
516 | pr_err_ratelimited("kvm [%i]: " fmt, \ | |
517 | task_tgid_nr(current), ## __VA_ARGS__) | |
f0242478 | 518 | |
a737f256 CD |
519 | /* The guest did something we don't support. */ |
520 | #define vcpu_unimpl(vcpu, fmt, ...) \ | |
671d9ab3 BP |
521 | kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \ |
522 | (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__) | |
6aa8b732 | 523 | |
ee86dbc6 AS |
524 | #define vcpu_debug(vcpu, fmt, ...) \ |
525 | kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) | |
ae0f5499 BD |
526 | #define vcpu_debug_ratelimited(vcpu, fmt, ...) \ |
527 | kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \ | |
528 | ## __VA_ARGS__) | |
765eaa0f AS |
529 | #define vcpu_err(vcpu, fmt, ...) \ |
530 | kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) | |
ee86dbc6 | 531 | |
4a12f951 CB |
532 | static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) |
533 | { | |
534 | return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, | |
3898da94 PB |
535 | lockdep_is_held(&kvm->slots_lock) || |
536 | !refcount_read(&kvm->users_count)); | |
4a12f951 CB |
537 | } |
538 | ||
988a2cae GN |
539 | static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) |
540 | { | |
1d487e9b PB |
541 | int num_vcpus = atomic_read(&kvm->online_vcpus); |
542 | i = array_index_nospec(i, num_vcpus); | |
543 | ||
544 | /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ | |
988a2cae GN |
545 | smp_rmb(); |
546 | return kvm->vcpus[i]; | |
547 | } | |
548 | ||
549 | #define kvm_for_each_vcpu(idx, vcpup, kvm) \ | |
b42fc3cb JM |
550 | for (idx = 0; \ |
551 | idx < atomic_read(&kvm->online_vcpus) && \ | |
552 | (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ | |
553 | idx++) | |
988a2cae | 554 | |
db27a7a3 DH |
555 | static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) |
556 | { | |
9b9e3fc4 | 557 | struct kvm_vcpu *vcpu = NULL; |
db27a7a3 DH |
558 | int i; |
559 | ||
9b9e3fc4 | 560 | if (id < 0) |
c896939f | 561 | return NULL; |
9b9e3fc4 GK |
562 | if (id < KVM_MAX_VCPUS) |
563 | vcpu = kvm_get_vcpu(kvm, id); | |
c896939f DH |
564 | if (vcpu && vcpu->vcpu_id == id) |
565 | return vcpu; | |
db27a7a3 DH |
566 | kvm_for_each_vcpu(i, vcpu, kvm) |
567 | if (vcpu->vcpu_id == id) | |
568 | return vcpu; | |
569 | return NULL; | |
570 | } | |
571 | ||
497d72d8 CD |
572 | static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu) |
573 | { | |
8750e72a | 574 | return vcpu->vcpu_idx; |
497d72d8 CD |
575 | } |
576 | ||
be6ba0f0 XG |
577 | #define kvm_for_each_memslot(memslot, slots) \ |
578 | for (memslot = &slots->memslots[0]; \ | |
bf3e05bc XG |
579 | memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ |
580 | memslot++) | |
be6ba0f0 | 581 | |
fb3f0f51 RR |
582 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); |
583 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); | |
584 | ||
ec7660cc | 585 | void vcpu_load(struct kvm_vcpu *vcpu); |
313a3dc7 CO |
586 | void vcpu_put(struct kvm_vcpu *vcpu); |
587 | ||
6ef768fa | 588 | #ifdef __KVM_HAVE_IOAPIC |
993225ad | 589 | void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm); |
abdb080f | 590 | void kvm_arch_post_irq_routing_update(struct kvm *kvm); |
6ef768fa | 591 | #else |
993225ad | 592 | static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) |
6ef768fa PB |
593 | { |
594 | } | |
abdb080f | 595 | static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) |
b053b2ae SR |
596 | { |
597 | } | |
6ef768fa PB |
598 | #endif |
599 | ||
297e2105 | 600 | #ifdef CONFIG_HAVE_KVM_IRQFD |
a0f155e9 CH |
601 | int kvm_irqfd_init(void); |
602 | void kvm_irqfd_exit(void); | |
603 | #else | |
604 | static inline int kvm_irqfd_init(void) | |
605 | { | |
606 | return 0; | |
607 | } | |
608 | ||
609 | static inline void kvm_irqfd_exit(void) | |
610 | { | |
611 | } | |
612 | #endif | |
0ee75bea | 613 | int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, |
c16f862d | 614 | struct module *module); |
cb498ea2 | 615 | void kvm_exit(void); |
6aa8b732 | 616 | |
d39f13b0 IE |
617 | void kvm_get_kvm(struct kvm *kvm); |
618 | void kvm_put_kvm(struct kvm *kvm); | |
149487bd | 619 | void kvm_put_kvm_no_destroy(struct kvm *kvm); |
d39f13b0 | 620 | |
f481b069 | 621 | static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) |
90d83dc3 | 622 | { |
1d487e9b | 623 | as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM); |
7e988b10 | 624 | return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, |
3898da94 PB |
625 | lockdep_is_held(&kvm->slots_lock) || |
626 | !refcount_read(&kvm->users_count)); | |
90d83dc3 LJ |
627 | } |
628 | ||
f481b069 PB |
629 | static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) |
630 | { | |
631 | return __kvm_memslots(kvm, 0); | |
632 | } | |
633 | ||
8e73485c PB |
634 | static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) |
635 | { | |
f481b069 PB |
636 | int as_id = kvm_arch_vcpu_memslots_id(vcpu); |
637 | ||
638 | return __kvm_memslots(vcpu->kvm, as_id); | |
8e73485c PB |
639 | } |
640 | ||
28a37544 XG |
641 | static inline struct kvm_memory_slot * |
642 | id_to_memslot(struct kvm_memslots *slots, int id) | |
643 | { | |
f85e2cb5 XG |
644 | int index = slots->id_to_index[id]; |
645 | struct kvm_memory_slot *slot; | |
bf3e05bc | 646 | |
f85e2cb5 | 647 | slot = &slots->memslots[index]; |
bf3e05bc | 648 | |
f85e2cb5 XG |
649 | WARN_ON(slot->id != id); |
650 | return slot; | |
28a37544 XG |
651 | } |
652 | ||
74d0727c TY |
653 | /* |
654 | * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: | |
655 | * - create a new memory slot | |
656 | * - delete an existing memory slot | |
657 | * - modify an existing memory slot | |
658 | * -- move it in the guest physical memory space | |
659 | * -- just change its flags | |
660 | * | |
661 | * Since flags can be changed by some of these operations, the following | |
662 | * differentiation is the best we can do for __kvm_set_memory_region(): | |
663 | */ | |
664 | enum kvm_mr_change { | |
665 | KVM_MR_CREATE, | |
666 | KVM_MR_DELETE, | |
667 | KVM_MR_MOVE, | |
668 | KVM_MR_FLAGS_ONLY, | |
669 | }; | |
670 | ||
210c7c4d | 671 | int kvm_set_memory_region(struct kvm *kvm, |
09170a49 | 672 | const struct kvm_userspace_memory_region *mem); |
f78e0e2e | 673 | int __kvm_set_memory_region(struct kvm *kvm, |
09170a49 | 674 | const struct kvm_userspace_memory_region *mem); |
5587027c | 675 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, |
db3fe4eb | 676 | struct kvm_memory_slot *dont); |
5587027c AK |
677 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
678 | unsigned long npages); | |
15248258 | 679 | void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); |
f7784b8e MT |
680 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
681 | struct kvm_memory_slot *memslot, | |
09170a49 | 682 | const struct kvm_userspace_memory_region *mem, |
7b6195a9 | 683 | enum kvm_mr_change change); |
f7784b8e | 684 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
09170a49 | 685 | const struct kvm_userspace_memory_region *mem, |
8482644a | 686 | const struct kvm_memory_slot *old, |
f36f3f28 | 687 | const struct kvm_memory_slot *new, |
8482644a | 688 | enum kvm_mr_change change); |
db3fe4eb | 689 | bool kvm_largepages_enabled(void); |
54dee993 | 690 | void kvm_disable_largepages(void); |
2df72e9b MT |
691 | /* flush all memory translations */ |
692 | void kvm_arch_flush_shadow_all(struct kvm *kvm); | |
693 | /* flush memory translations pointing to 'slot' */ | |
694 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | |
695 | struct kvm_memory_slot *slot); | |
a983fb23 | 696 | |
d9ef13c2 PB |
697 | int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, |
698 | struct page **pages, int nr_pages); | |
48987781 | 699 | |
954bbbc2 | 700 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
05da4558 | 701 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); |
ba6a3541 | 702 | unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); |
4d8b81ab | 703 | unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
64d83126 CD |
704 | unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, |
705 | bool *writable); | |
b4231d61 IE |
706 | void kvm_release_page_clean(struct page *page); |
707 | void kvm_release_page_dirty(struct page *page); | |
35149e21 AL |
708 | void kvm_set_page_accessed(struct page *page); |
709 | ||
ba049e93 DW |
710 | kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); |
711 | kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); | |
712 | kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, | |
612819c3 | 713 | bool *writable); |
ba049e93 DW |
714 | kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
715 | kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); | |
716 | kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, | |
717 | bool atomic, bool *async, bool write_fault, | |
718 | bool *writable); | |
037d92dc | 719 | |
ba049e93 | 720 | void kvm_release_pfn_clean(kvm_pfn_t pfn); |
f7a6509f | 721 | void kvm_release_pfn_dirty(kvm_pfn_t pfn); |
ba049e93 DW |
722 | void kvm_set_pfn_dirty(kvm_pfn_t pfn); |
723 | void kvm_set_pfn_accessed(kvm_pfn_t pfn); | |
724 | void kvm_get_pfn(kvm_pfn_t pfn); | |
35149e21 | 725 | |
195aefde IE |
726 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, |
727 | int len); | |
7ec54588 MT |
728 | int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, |
729 | unsigned long len); | |
195aefde | 730 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); |
4e335d9e PB |
731 | int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
732 | void *data, unsigned long len); | |
195aefde IE |
733 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, |
734 | int offset, int len); | |
735 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, | |
736 | unsigned long len); | |
4e335d9e PB |
737 | int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
738 | void *data, unsigned long len); | |
739 | int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | |
7a86dab8 JM |
740 | void *data, unsigned int offset, |
741 | unsigned long len); | |
4e335d9e PB |
742 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
743 | gpa_t gpa, unsigned long len); | |
cac0f1b7 SP |
744 | |
745 | #define __kvm_put_guest(kvm, gfn, offset, value, type) \ | |
746 | ({ \ | |
747 | unsigned long __addr = gfn_to_hva(kvm, gfn); \ | |
748 | type __user *__uaddr = (type __user *)(__addr + offset); \ | |
749 | int __ret = -EFAULT; \ | |
750 | \ | |
751 | if (!kvm_is_error_hva(__addr)) \ | |
752 | __ret = put_user(value, __uaddr); \ | |
753 | if (!__ret) \ | |
754 | mark_page_dirty(kvm, gfn); \ | |
755 | __ret; \ | |
756 | }) | |
757 | ||
758 | #define kvm_put_guest(kvm, gpa, value, type) \ | |
759 | ({ \ | |
760 | gpa_t __gpa = gpa; \ | |
761 | struct kvm *__kvm = kvm; \ | |
762 | __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \ | |
763 | offset_in_page(__gpa), (value), type); \ | |
764 | }) | |
765 | ||
195aefde IE |
766 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); |
767 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); | |
6aa8b732 | 768 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); |
33e94154 | 769 | bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); |
8f0b1ab6 | 770 | unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); |
6aa8b732 AK |
771 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); |
772 | ||
8e73485c PB |
773 | struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); |
774 | struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); | |
ba049e93 DW |
775 | kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); |
776 | kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); | |
e45adf66 | 777 | int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); |
8e73485c | 778 | struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); |
e45adf66 | 779 | void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); |
8e73485c PB |
780 | unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); |
781 | unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); | |
782 | int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, | |
783 | int len); | |
784 | int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, | |
785 | unsigned long len); | |
786 | int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, | |
787 | unsigned long len); | |
788 | int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, | |
789 | int offset, int len); | |
790 | int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, | |
791 | unsigned long len); | |
792 | void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); | |
793 | ||
20b7035c JS |
794 | void kvm_sigset_activate(struct kvm_vcpu *vcpu); |
795 | void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); | |
796 | ||
8776e519 | 797 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); |
3217f7c2 CD |
798 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); |
799 | void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); | |
178f02ff | 800 | bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); |
b6d33834 | 801 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
fa93384f | 802 | int kvm_vcpu_yield_to(struct kvm_vcpu *target); |
199b5763 | 803 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); |
a4ee1ca4 | 804 | |
d9e368d6 | 805 | void kvm_flush_remote_tlbs(struct kvm *kvm); |
2e53d63a | 806 | void kvm_reload_remote_mmus(struct kvm *kvm); |
7053df4e VK |
807 | |
808 | bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, | |
809 | unsigned long *vcpu_bitmap, cpumask_var_t tmp); | |
445b8236 | 810 | bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); |
7ee30bc1 NNL |
811 | bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req, |
812 | unsigned long *vcpu_bitmap); | |
6aa8b732 | 813 | |
043405e1 CO |
814 | long kvm_arch_dev_ioctl(struct file *filp, |
815 | unsigned int ioctl, unsigned long arg); | |
313a3dc7 CO |
816 | long kvm_arch_vcpu_ioctl(struct file *filp, |
817 | unsigned int ioctl, unsigned long arg); | |
1499fa80 | 818 | vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); |
018d00d2 | 819 | |
784aa3d7 | 820 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); |
018d00d2 | 821 | |
5bb064dc ZX |
822 | int kvm_get_dirty_log(struct kvm *kvm, |
823 | struct kvm_dirty_log *log, int *is_dirty); | |
ba0513b5 MS |
824 | |
825 | int kvm_get_dirty_log_protect(struct kvm *kvm, | |
8fe65a82 | 826 | struct kvm_dirty_log *log, bool *flush); |
2a31b9db PB |
827 | int kvm_clear_dirty_log_protect(struct kvm *kvm, |
828 | struct kvm_clear_dirty_log *log, bool *flush); | |
ba0513b5 | 829 | |
3b0f1d01 | 830 | void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, |
ba0513b5 MS |
831 | struct kvm_memory_slot *slot, |
832 | gfn_t gfn_offset, | |
833 | unsigned long mask); | |
834 | ||
5bb064dc ZX |
835 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
836 | struct kvm_dirty_log *log); | |
2a31b9db PB |
837 | int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, |
838 | struct kvm_clear_dirty_log *log); | |
5bb064dc | 839 | |
aa2fbe6d YZ |
840 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, |
841 | bool line_status); | |
e5d83c74 PB |
842 | int kvm_vm_ioctl_enable_cap(struct kvm *kvm, |
843 | struct kvm_enable_cap *cap); | |
1fe779f8 CO |
844 | long kvm_arch_vm_ioctl(struct file *filp, |
845 | unsigned int ioctl, unsigned long arg); | |
313a3dc7 | 846 | |
d0752060 HB |
847 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); |
848 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); | |
849 | ||
8b006791 ZX |
850 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
851 | struct kvm_translation *tr); | |
852 | ||
b6c7a5dc HB |
853 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); |
854 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); | |
855 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |
856 | struct kvm_sregs *sregs); | |
857 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |
858 | struct kvm_sregs *sregs); | |
62d9f0db MT |
859 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
860 | struct kvm_mp_state *mp_state); | |
861 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |
862 | struct kvm_mp_state *mp_state); | |
d0bfb940 JK |
863 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
864 | struct kvm_guest_debug *dbg); | |
b6c7a5dc HB |
865 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); |
866 | ||
f8c16bba ZX |
867 | int kvm_arch_init(void *opaque); |
868 | void kvm_arch_exit(void); | |
043405e1 | 869 | |
e9b11c17 ZX |
870 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); |
871 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); | |
872 | ||
e790d9ef RK |
873 | void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); |
874 | ||
e9b11c17 ZX |
875 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
876 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); | |
897cc38e | 877 | int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id); |
e9b11c17 | 878 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); |
26e5215f | 879 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); |
31928aa5 | 880 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); |
d40ccc62 | 881 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); |
e9b11c17 | 882 | |
741cbbae | 883 | #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS |
3e7093d0 | 884 | void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu); |
741cbbae | 885 | #endif |
235539b4 | 886 | |
13a34e06 RK |
887 | int kvm_arch_hardware_enable(void); |
888 | void kvm_arch_hardware_disable(void); | |
e9b11c17 ZX |
889 | int kvm_arch_hardware_setup(void); |
890 | void kvm_arch_hardware_unsetup(void); | |
f257d6dc | 891 | int kvm_arch_check_processor_compat(void); |
1d737c8a | 892 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); |
199b5763 | 893 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); |
b6d33834 | 894 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); |
17e433b5 | 895 | bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); |
e9b11c17 | 896 | |
d89f5eff | 897 | #ifndef __KVM_HAVE_ARCH_VM_ALLOC |
d1e5b0e9 MO |
898 | /* |
899 | * All architectures that want to use vzalloc currently also | |
900 | * need their own kvm_arch_alloc_vm implementation. | |
901 | */ | |
d89f5eff JK |
902 | static inline struct kvm *kvm_arch_alloc_vm(void) |
903 | { | |
904 | return kzalloc(sizeof(struct kvm), GFP_KERNEL); | |
905 | } | |
906 | ||
907 | static inline void kvm_arch_free_vm(struct kvm *kvm) | |
908 | { | |
909 | kfree(kvm); | |
910 | } | |
911 | #endif | |
912 | ||
b08660e5 TL |
913 | #ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB |
914 | static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) | |
915 | { | |
916 | return -ENOTSUPP; | |
917 | } | |
918 | #endif | |
919 | ||
e0f0bbc5 AW |
920 | #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA |
921 | void kvm_arch_register_noncoherent_dma(struct kvm *kvm); | |
922 | void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); | |
923 | bool kvm_arch_has_noncoherent_dma(struct kvm *kvm); | |
924 | #else | |
925 | static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) | |
926 | { | |
927 | } | |
928 | ||
929 | static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) | |
930 | { | |
931 | } | |
932 | ||
933 | static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) | |
934 | { | |
935 | return false; | |
936 | } | |
937 | #endif | |
5544eb9b PB |
938 | #ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE |
939 | void kvm_arch_start_assignment(struct kvm *kvm); | |
940 | void kvm_arch_end_assignment(struct kvm *kvm); | |
941 | bool kvm_arch_has_assigned_device(struct kvm *kvm); | |
942 | #else | |
943 | static inline void kvm_arch_start_assignment(struct kvm *kvm) | |
944 | { | |
945 | } | |
946 | ||
947 | static inline void kvm_arch_end_assignment(struct kvm *kvm) | |
948 | { | |
949 | } | |
950 | ||
951 | static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) | |
952 | { | |
953 | return false; | |
954 | } | |
955 | #endif | |
e0f0bbc5 | 956 | |
8577370f | 957 | static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) |
b6d33834 | 958 | { |
2246f8b5 AG |
959 | #ifdef __KVM_HAVE_ARCH_WQP |
960 | return vcpu->arch.wqp; | |
961 | #else | |
b6d33834 | 962 | return &vcpu->wq; |
b6d33834 | 963 | #endif |
2246f8b5 | 964 | } |
b6d33834 | 965 | |
01c94e64 EA |
966 | #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED |
967 | /* | |
968 | * returns true if the virtual interrupt controller is initialized and | |
969 | * ready to accept virtual IRQ. On some architectures the virtual interrupt | |
970 | * controller is dynamically instantiated and this is not always true. | |
971 | */ | |
972 | bool kvm_arch_intc_initialized(struct kvm *kvm); | |
973 | #else | |
974 | static inline bool kvm_arch_intc_initialized(struct kvm *kvm) | |
975 | { | |
976 | return true; | |
977 | } | |
978 | #endif | |
979 | ||
e08b9637 | 980 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); |
d19a9cd2 | 981 | void kvm_arch_destroy_vm(struct kvm *kvm); |
ad8ba2cd | 982 | void kvm_arch_sync_events(struct kvm *kvm); |
e9b11c17 | 983 | |
3d80840d | 984 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); |
682c59a3 | 985 | |
ba049e93 | 986 | bool kvm_is_reserved_pfn(kvm_pfn_t pfn); |
a78986aa | 987 | bool kvm_is_zone_device_pfn(kvm_pfn_t pfn); |
c77fb9dc | 988 | |
62c476c7 BAY |
989 | struct kvm_irq_ack_notifier { |
990 | struct hlist_node link; | |
991 | unsigned gsi; | |
992 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); | |
993 | }; | |
994 | ||
9957c86d PM |
995 | int kvm_irq_map_gsi(struct kvm *kvm, |
996 | struct kvm_kernel_irq_routing_entry *entries, int gsi); | |
997 | int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); | |
8ba918d4 | 998 | |
aa2fbe6d YZ |
999 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, |
1000 | bool line_status); | |
bd2b53b2 | 1001 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, |
aa2fbe6d | 1002 | int irq_source_id, int level, bool line_status); |
b97e6de9 PB |
1003 | int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, |
1004 | struct kvm *kvm, int irq_source_id, | |
1005 | int level, bool line_status); | |
c7c9c56c | 1006 | bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); |
ba1aefcd | 1007 | void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); |
44882eed | 1008 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); |
3de42dc0 XZ |
1009 | void kvm_register_irq_ack_notifier(struct kvm *kvm, |
1010 | struct kvm_irq_ack_notifier *kian); | |
fa40a821 MT |
1011 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, |
1012 | struct kvm_irq_ack_notifier *kian); | |
5550af4d SY |
1013 | int kvm_request_irq_source_id(struct kvm *kvm); |
1014 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); | |
cdc238eb | 1015 | bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); |
62c476c7 | 1016 | |
9d4cba7f PM |
1017 | /* |
1018 | * search_memslots() and __gfn_to_memslot() are here because they are | |
1019 | * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. | |
1020 | * gfn_to_memslot() itself isn't here as an inline because that would | |
1021 | * bloat other code too much. | |
1022 | */ | |
1023 | static inline struct kvm_memory_slot * | |
1024 | search_memslots(struct kvm_memslots *slots, gfn_t gfn) | |
1025 | { | |
9c1a5d38 | 1026 | int start = 0, end = slots->used_slots; |
d4ae84a0 | 1027 | int slot = atomic_read(&slots->lru_slot); |
9c1a5d38 IM |
1028 | struct kvm_memory_slot *memslots = slots->memslots; |
1029 | ||
1030 | if (gfn >= memslots[slot].base_gfn && | |
1031 | gfn < memslots[slot].base_gfn + memslots[slot].npages) | |
1032 | return &memslots[slot]; | |
1033 | ||
1034 | while (start < end) { | |
1035 | slot = start + (end - start) / 2; | |
1036 | ||
1037 | if (gfn >= memslots[slot].base_gfn) | |
1038 | end = slot; | |
1039 | else | |
1040 | start = slot + 1; | |
1041 | } | |
1042 | ||
1043 | if (gfn >= memslots[start].base_gfn && | |
1044 | gfn < memslots[start].base_gfn + memslots[start].npages) { | |
1045 | atomic_set(&slots->lru_slot, start); | |
1046 | return &memslots[start]; | |
1047 | } | |
9d4cba7f PM |
1048 | |
1049 | return NULL; | |
1050 | } | |
1051 | ||
1052 | static inline struct kvm_memory_slot * | |
1053 | __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) | |
1054 | { | |
1055 | return search_memslots(slots, gfn); | |
1056 | } | |
1057 | ||
66a03505 GS |
1058 | static inline unsigned long |
1059 | __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) | |
1060 | { | |
1061 | return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; | |
1062 | } | |
1063 | ||
0ee8dcb8 XG |
1064 | static inline int memslot_id(struct kvm *kvm, gfn_t gfn) |
1065 | { | |
1066 | return gfn_to_memslot(kvm, gfn)->id; | |
1067 | } | |
1068 | ||
d19a748b TY |
1069 | static inline gfn_t |
1070 | hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) | |
887c08ac | 1071 | { |
d19a748b TY |
1072 | gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; |
1073 | ||
1074 | return slot->base_gfn + gfn_offset; | |
887c08ac XG |
1075 | } |
1076 | ||
1755fbcc AK |
1077 | static inline gpa_t gfn_to_gpa(gfn_t gfn) |
1078 | { | |
1079 | return (gpa_t)gfn << PAGE_SHIFT; | |
1080 | } | |
6aa8b732 | 1081 | |
c30a358d JR |
1082 | static inline gfn_t gpa_to_gfn(gpa_t gpa) |
1083 | { | |
1084 | return (gfn_t)(gpa >> PAGE_SHIFT); | |
1085 | } | |
1086 | ||
ba049e93 | 1087 | static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) |
62c476c7 BAY |
1088 | { |
1089 | return (hpa_t)pfn << PAGE_SHIFT; | |
1090 | } | |
1091 | ||
5e2f30b7 DH |
1092 | static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu, |
1093 | gpa_t gpa) | |
1094 | { | |
1095 | return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa)); | |
1096 | } | |
1097 | ||
dfeec843 HC |
1098 | static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) |
1099 | { | |
1100 | unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); | |
1101 | ||
1102 | return kvm_is_error_hva(hva); | |
1103 | } | |
1104 | ||
ba1389b7 AK |
1105 | enum kvm_stat_kind { |
1106 | KVM_STAT_VM, | |
1107 | KVM_STAT_VCPU, | |
1108 | }; | |
1109 | ||
536a6f88 | 1110 | struct kvm_stat_data { |
536a6f88 | 1111 | struct kvm *kvm; |
09cbcef6 | 1112 | struct kvm_stats_debugfs_item *dbgfs_item; |
536a6f88 JF |
1113 | }; |
1114 | ||
417bc304 HB |
1115 | struct kvm_stats_debugfs_item { |
1116 | const char *name; | |
1117 | int offset; | |
ba1389b7 | 1118 | enum kvm_stat_kind kind; |
833b45de | 1119 | int mode; |
417bc304 | 1120 | }; |
09cbcef6 MP |
1121 | |
1122 | #define KVM_DBGFS_GET_MODE(dbgfs_item) \ | |
1123 | ((dbgfs_item)->mode ? (dbgfs_item)->mode : 0644) | |
1124 | ||
417bc304 | 1125 | extern struct kvm_stats_debugfs_item debugfs_entries[]; |
76f7c879 | 1126 | extern struct dentry *kvm_debugfs_dir; |
d4c9ff2d | 1127 | |
36c1ed82 | 1128 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
8ca40a70 | 1129 | static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) |
e930bffe | 1130 | { |
8ca40a70 | 1131 | if (unlikely(kvm->mmu_notifier_count)) |
e930bffe AA |
1132 | return 1; |
1133 | /* | |
a355aa54 PM |
1134 | * Ensure the read of mmu_notifier_count happens before the read |
1135 | * of mmu_notifier_seq. This interacts with the smp_wmb() in | |
1136 | * mmu_notifier_invalidate_range_end to make sure that the caller | |
1137 | * either sees the old (non-zero) value of mmu_notifier_count or | |
1138 | * the new (incremented) value of mmu_notifier_seq. | |
1139 | * PowerPC Book3s HV KVM calls this under a per-page lock | |
1140 | * rather than under kvm->mmu_lock, for scalability, so | |
1141 | * can't rely on kvm->mmu_lock to keep things ordered. | |
e930bffe | 1142 | */ |
a355aa54 | 1143 | smp_rmb(); |
8ca40a70 | 1144 | if (kvm->mmu_notifier_seq != mmu_seq) |
e930bffe AA |
1145 | return 1; |
1146 | return 0; | |
1147 | } | |
1148 | #endif | |
1149 | ||
a725d56a | 1150 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
399ec807 | 1151 | |
ddc9cfb7 | 1152 | #define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */ |
399ec807 | 1153 | |
5c0aea0e | 1154 | bool kvm_arch_can_set_irq_routing(struct kvm *kvm); |
399ec807 AK |
1155 | int kvm_set_irq_routing(struct kvm *kvm, |
1156 | const struct kvm_irq_routing_entry *entries, | |
1157 | unsigned nr, | |
1158 | unsigned flags); | |
c63cf538 RK |
1159 | int kvm_set_routing_entry(struct kvm *kvm, |
1160 | struct kvm_kernel_irq_routing_entry *e, | |
e8cde093 | 1161 | const struct kvm_irq_routing_entry *ue); |
399ec807 AK |
1162 | void kvm_free_irq_routing(struct kvm *kvm); |
1163 | ||
1164 | #else | |
1165 | ||
1166 | static inline void kvm_free_irq_routing(struct kvm *kvm) {} | |
1167 | ||
1168 | #endif | |
1169 | ||
297e2105 PM |
1170 | int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); |
1171 | ||
721eecbf GH |
1172 | #ifdef CONFIG_HAVE_KVM_EVENTFD |
1173 | ||
d34e6b17 | 1174 | void kvm_eventfd_init(struct kvm *kvm); |
914daba8 AG |
1175 | int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); |
1176 | ||
297e2105 | 1177 | #ifdef CONFIG_HAVE_KVM_IRQFD |
d4db2935 | 1178 | int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); |
721eecbf | 1179 | void kvm_irqfd_release(struct kvm *kvm); |
9957c86d | 1180 | void kvm_irq_routing_update(struct kvm *); |
914daba8 AG |
1181 | #else |
1182 | static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) | |
1183 | { | |
1184 | return -EINVAL; | |
1185 | } | |
1186 | ||
1187 | static inline void kvm_irqfd_release(struct kvm *kvm) {} | |
1188 | #endif | |
721eecbf GH |
1189 | |
1190 | #else | |
1191 | ||
d34e6b17 | 1192 | static inline void kvm_eventfd_init(struct kvm *kvm) {} |
bd2b53b2 | 1193 | |
d4db2935 | 1194 | static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) |
721eecbf GH |
1195 | { |
1196 | return -EINVAL; | |
1197 | } | |
1198 | ||
1199 | static inline void kvm_irqfd_release(struct kvm *kvm) {} | |
bd2b53b2 | 1200 | |
27923eb1 | 1201 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
9957c86d | 1202 | static inline void kvm_irq_routing_update(struct kvm *kvm) |
bd2b53b2 | 1203 | { |
bd2b53b2 | 1204 | } |
27923eb1 | 1205 | #endif |
bd2b53b2 | 1206 | |
d34e6b17 GH |
1207 | static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) |
1208 | { | |
1209 | return -ENOSYS; | |
1210 | } | |
721eecbf GH |
1211 | |
1212 | #endif /* CONFIG_HAVE_KVM_EVENTFD */ | |
1213 | ||
07646749 SO |
1214 | void kvm_arch_irq_routing_update(struct kvm *kvm); |
1215 | ||
a8eeb04a AK |
1216 | static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) |
1217 | { | |
2e4682ba PB |
1218 | /* |
1219 | * Ensure the rest of the request is published to kvm_check_request's | |
1220 | * caller. Paired with the smp_mb__after_atomic in kvm_check_request. | |
1221 | */ | |
1222 | smp_wmb(); | |
86dafed5 | 1223 | set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); |
a8eeb04a AK |
1224 | } |
1225 | ||
2fa6e1e1 RK |
1226 | static inline bool kvm_request_pending(struct kvm_vcpu *vcpu) |
1227 | { | |
1228 | return READ_ONCE(vcpu->requests); | |
1229 | } | |
1230 | ||
72875d8a RK |
1231 | static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu) |
1232 | { | |
86dafed5 | 1233 | return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); |
72875d8a RK |
1234 | } |
1235 | ||
1236 | static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu) | |
1237 | { | |
86dafed5 | 1238 | clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); |
72875d8a RK |
1239 | } |
1240 | ||
a8eeb04a AK |
1241 | static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) |
1242 | { | |
72875d8a RK |
1243 | if (kvm_test_request(req, vcpu)) { |
1244 | kvm_clear_request(req, vcpu); | |
2e4682ba PB |
1245 | |
1246 | /* | |
1247 | * Ensure the rest of the request is visible to kvm_check_request's | |
1248 | * caller. Paired with the smp_wmb in kvm_make_request. | |
1249 | */ | |
1250 | smp_mb__after_atomic(); | |
0719837c AK |
1251 | return true; |
1252 | } else { | |
1253 | return false; | |
1254 | } | |
a8eeb04a AK |
1255 | } |
1256 | ||
8b415dcd GL |
1257 | extern bool kvm_rebooting; |
1258 | ||
ec76d819 SJS |
1259 | extern unsigned int halt_poll_ns; |
1260 | extern unsigned int halt_poll_ns_grow; | |
49113d36 | 1261 | extern unsigned int halt_poll_ns_grow_start; |
ec76d819 SJS |
1262 | extern unsigned int halt_poll_ns_shrink; |
1263 | ||
852b6d57 | 1264 | struct kvm_device { |
8538cb22 | 1265 | const struct kvm_device_ops *ops; |
852b6d57 | 1266 | struct kvm *kvm; |
852b6d57 | 1267 | void *private; |
07f0a7bd | 1268 | struct list_head vm_node; |
852b6d57 SW |
1269 | }; |
1270 | ||
1271 | /* create, destroy, and name are mandatory */ | |
1272 | struct kvm_device_ops { | |
1273 | const char *name; | |
a28ebea2 CD |
1274 | |
1275 | /* | |
1276 | * create is called holding kvm->lock and any operations not suitable | |
1277 | * to do while holding the lock should be deferred to init (see | |
1278 | * below). | |
1279 | */ | |
852b6d57 SW |
1280 | int (*create)(struct kvm_device *dev, u32 type); |
1281 | ||
023e9fdd CD |
1282 | /* |
1283 | * init is called after create if create is successful and is called | |
1284 | * outside of holding kvm->lock. | |
1285 | */ | |
1286 | void (*init)(struct kvm_device *dev); | |
1287 | ||
852b6d57 SW |
1288 | /* |
1289 | * Destroy is responsible for freeing dev. | |
1290 | * | |
1291 | * Destroy may be called before or after destructors are called | |
1292 | * on emulated I/O regions, depending on whether a reference is | |
1293 | * held by a vcpu or other kvm component that gets destroyed | |
1294 | * after the emulated I/O. | |
1295 | */ | |
1296 | void (*destroy)(struct kvm_device *dev); | |
1297 | ||
2bde9b3e CLG |
1298 | /* |
1299 | * Release is an alternative method to free the device. It is | |
1300 | * called when the device file descriptor is closed. Once | |
1301 | * release is called, the destroy method will not be called | |
1302 | * anymore as the device is removed from the device list of | |
1303 | * the VM. kvm->lock is held. | |
1304 | */ | |
1305 | void (*release)(struct kvm_device *dev); | |
1306 | ||
852b6d57 SW |
1307 | int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); |
1308 | int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); | |
1309 | int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); | |
1310 | long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, | |
1311 | unsigned long arg); | |
a1cd3f08 | 1312 | int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma); |
852b6d57 SW |
1313 | }; |
1314 | ||
1315 | void kvm_device_get(struct kvm_device *dev); | |
1316 | void kvm_device_put(struct kvm_device *dev); | |
1317 | struct kvm_device *kvm_device_from_filp(struct file *filp); | |
8538cb22 | 1318 | int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type); |
571ee1b6 | 1319 | void kvm_unregister_device_ops(u32 type); |
852b6d57 | 1320 | |
5df554ad | 1321 | extern struct kvm_device_ops kvm_mpic_ops; |
ea2f83a7 | 1322 | extern struct kvm_device_ops kvm_arm_vgic_v2_ops; |
a0675c25 | 1323 | extern struct kvm_device_ops kvm_arm_vgic_v3_ops; |
5df554ad | 1324 | |
4c088493 R |
1325 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT |
1326 | ||
1327 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) | |
1328 | { | |
1329 | vcpu->spin_loop.in_spin_loop = val; | |
1330 | } | |
1331 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) | |
1332 | { | |
1333 | vcpu->spin_loop.dy_eligible = val; | |
1334 | } | |
1335 | ||
1336 | #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ | |
1337 | ||
1338 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) | |
1339 | { | |
1340 | } | |
1341 | ||
1342 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) | |
1343 | { | |
1344 | } | |
4c088493 | 1345 | #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ |
1a02b270 EA |
1346 | |
1347 | #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS | |
14717e20 | 1348 | bool kvm_arch_has_irq_bypass(void); |
1a02b270 EA |
1349 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, |
1350 | struct irq_bypass_producer *); | |
1351 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, | |
1352 | struct irq_bypass_producer *); | |
1353 | void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); | |
1354 | void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); | |
f70c20aa FW |
1355 | int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, |
1356 | uint32_t guest_irq, bool set); | |
1a02b270 | 1357 | #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ |
35181e86 | 1358 | |
3491caf2 CB |
1359 | #ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS |
1360 | /* If we wakeup during the poll time, was it a sucessful poll? */ | |
1361 | static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) | |
1362 | { | |
1363 | return vcpu->valid_wakeup; | |
1364 | } | |
1365 | ||
1366 | #else | |
1367 | static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) | |
1368 | { | |
1369 | return true; | |
1370 | } | |
1371 | #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ | |
1372 | ||
cdd6ad3a CB |
1373 | #ifdef CONFIG_HAVE_KVM_NO_POLL |
1374 | /* Callback that tells if we must not poll */ | |
1375 | bool kvm_arch_no_poll(struct kvm_vcpu *vcpu); | |
1376 | #else | |
1377 | static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) | |
1378 | { | |
1379 | return false; | |
1380 | } | |
1381 | #endif /* CONFIG_HAVE_KVM_NO_POLL */ | |
1382 | ||
5cb0944c PB |
1383 | #ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL |
1384 | long kvm_arch_vcpu_async_ioctl(struct file *filp, | |
1385 | unsigned int ioctl, unsigned long arg); | |
1386 | #else | |
1387 | static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, | |
1388 | unsigned int ioctl, | |
1389 | unsigned long arg) | |
1390 | { | |
1391 | return -ENOIOCTLCMD; | |
1392 | } | |
1393 | #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ | |
1394 | ||
93065ac7 MH |
1395 | int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, |
1396 | unsigned long start, unsigned long end, bool blockable); | |
f75e4924 | 1397 | |
bd2a6394 CD |
1398 | #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE |
1399 | int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); | |
1400 | #else | |
1401 | static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) | |
1402 | { | |
1403 | return 0; | |
1404 | } | |
1405 | #endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */ | |
1406 | ||
c57c8046 JS |
1407 | typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data); |
1408 | ||
1409 | int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, | |
1410 | uintptr_t data, const char *name, | |
1411 | struct task_struct **thread_ptr); | |
1412 | ||
bfd99ff5 | 1413 | #endif |