Commit | Line | Data |
---|---|---|
d94d71cb | 1 | // SPDX-License-Identifier: GPL-2.0-only |
749cf76c CD |
2 | /* |
3 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
4 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
749cf76c | 5 | */ |
342cd0ab CD |
6 | |
7 | #include <linux/mman.h> | |
8 | #include <linux/kvm_host.h> | |
9 | #include <linux/io.h> | |
ad361f09 | 10 | #include <linux/hugetlb.h> |
196f878a | 11 | #include <linux/sched/signal.h> |
45e96ea6 | 12 | #include <trace/events/kvm.h> |
342cd0ab | 13 | #include <asm/pgalloc.h> |
94f8e641 | 14 | #include <asm/cacheflush.h> |
342cd0ab CD |
15 | #include <asm/kvm_arm.h> |
16 | #include <asm/kvm_mmu.h> | |
0f9d09b8 | 17 | #include <asm/kvm_pgtable.h> |
0db5e022 | 18 | #include <asm/kvm_ras.h> |
d5d8184d | 19 | #include <asm/kvm_asm.h> |
94f8e641 | 20 | #include <asm/kvm_emulate.h> |
1e947bad | 21 | #include <asm/virt.h> |
d5d8184d CD |
22 | |
23 | #include "trace.h" | |
342cd0ab | 24 | |
0f9d09b8 | 25 | static struct kvm_pgtable *hyp_pgtable; |
342cd0ab CD |
26 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); |
27 | ||
5a677ce0 MZ |
28 | static unsigned long hyp_idmap_start; |
29 | static unsigned long hyp_idmap_end; | |
30 | static phys_addr_t hyp_idmap_vector; | |
31 | ||
e3f019b3 MZ |
32 | static unsigned long io_map_base; |
33 | ||
6d674e28 | 34 | |
52bae936 WD |
35 | /* |
36 | * Release kvm_mmu_lock periodically if the memory region is large. Otherwise, | |
37 | * we may see kernel panics with CONFIG_DETECT_HUNG_TASK, | |
38 | * CONFIG_LOCKUP_DETECTOR, CONFIG_LOCKDEP. Additionally, holding the lock too | |
39 | * long will also starve other vCPUs. We have to also make sure that the page | |
40 | * tables are not freed while we released the lock. | |
41 | */ | |
42 | static int stage2_apply_range(struct kvm *kvm, phys_addr_t addr, | |
43 | phys_addr_t end, | |
44 | int (*fn)(struct kvm_pgtable *, u64, u64), | |
45 | bool resched) | |
46 | { | |
47 | int ret; | |
48 | u64 next; | |
49 | ||
50 | do { | |
51 | struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; | |
52 | if (!pgt) | |
53 | return -EINVAL; | |
54 | ||
55 | next = stage2_pgd_addr_end(kvm, addr, end); | |
56 | ret = fn(pgt, addr, next - addr); | |
57 | if (ret) | |
58 | break; | |
59 | ||
60 | if (resched && next != end) | |
61 | cond_resched_lock(&kvm->mmu_lock); | |
62 | } while (addr = next, addr != end); | |
63 | ||
64 | return ret; | |
65 | } | |
66 | ||
cc38d61c QP |
67 | #define stage2_apply_range_resched(kvm, addr, end, fn) \ |
68 | stage2_apply_range(kvm, addr, end, fn, true) | |
69 | ||
15a49a44 MS |
70 | static bool memslot_is_logging(struct kvm_memory_slot *memslot) |
71 | { | |
15a49a44 | 72 | return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); |
7276030a MS |
73 | } |
74 | ||
75 | /** | |
76 | * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8 | |
77 | * @kvm: pointer to kvm structure. | |
78 | * | |
79 | * Interface to HYP function to flush all VM TLB entries | |
80 | */ | |
81 | void kvm_flush_remote_tlbs(struct kvm *kvm) | |
82 | { | |
a0e50aa3 | 83 | kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu); |
15a49a44 | 84 | } |
ad361f09 | 85 | |
e6fab544 AB |
86 | static bool kvm_is_device_pfn(unsigned long pfn) |
87 | { | |
88 | return !pfn_valid(pfn); | |
89 | } | |
90 | ||
7aef0cbc QP |
91 | static void *stage2_memcache_zalloc_page(void *arg) |
92 | { | |
93 | struct kvm_mmu_memory_cache *mc = arg; | |
94 | ||
95 | /* Allocated with __GFP_ZERO, so no need to zero */ | |
96 | return kvm_mmu_memory_cache_alloc(mc); | |
97 | } | |
98 | ||
99 | static void *kvm_host_zalloc_pages_exact(size_t size) | |
100 | { | |
101 | return alloc_pages_exact(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); | |
102 | } | |
103 | ||
104 | static void kvm_host_get_page(void *addr) | |
105 | { | |
106 | get_page(virt_to_page(addr)); | |
107 | } | |
108 | ||
109 | static void kvm_host_put_page(void *addr) | |
110 | { | |
111 | put_page(virt_to_page(addr)); | |
112 | } | |
113 | ||
114 | static int kvm_host_page_count(void *addr) | |
115 | { | |
116 | return page_count(virt_to_page(addr)); | |
117 | } | |
118 | ||
119 | static phys_addr_t kvm_host_pa(void *addr) | |
120 | { | |
121 | return __pa(addr); | |
122 | } | |
123 | ||
124 | static void *kvm_host_va(phys_addr_t phys) | |
125 | { | |
126 | return __va(phys); | |
127 | } | |
128 | ||
363ef89f MZ |
129 | /* |
130 | * Unmapping vs dcache management: | |
131 | * | |
132 | * If a guest maps certain memory pages as uncached, all writes will | |
133 | * bypass the data cache and go directly to RAM. However, the CPUs | |
134 | * can still speculate reads (not writes) and fill cache lines with | |
135 | * data. | |
136 | * | |
137 | * Those cache lines will be *clean* cache lines though, so a | |
138 | * clean+invalidate operation is equivalent to an invalidate | |
139 | * operation, because no cache lines are marked dirty. | |
140 | * | |
141 | * Those clean cache lines could be filled prior to an uncached write | |
142 | * by the guest, and the cache coherent IO subsystem would therefore | |
143 | * end up writing old data to disk. | |
144 | * | |
145 | * This is why right after unmapping a page/section and invalidating | |
52bae936 WD |
146 | * the corresponding TLBs, we flush to make sure the IO subsystem will |
147 | * never hit in the cache. | |
e48d53a9 MZ |
148 | * |
149 | * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as | |
150 | * we then fully enforce cacheability of RAM, no matter what the guest | |
151 | * does. | |
363ef89f | 152 | */ |
7a1c831e SP |
153 | /** |
154 | * unmap_stage2_range -- Clear stage2 page table entries to unmap a range | |
c9c0279c | 155 | * @mmu: The KVM stage-2 MMU pointer |
7a1c831e SP |
156 | * @start: The intermediate physical base address of the range to unmap |
157 | * @size: The size of the area to unmap | |
c9c0279c | 158 | * @may_block: Whether or not we are permitted to block |
7a1c831e SP |
159 | * |
160 | * Clear a range of stage-2 mappings, lowering the various ref-counts. Must | |
161 | * be called while holding mmu_lock (unless for freeing the stage2 pgd before | |
162 | * destroying the VM), otherwise another faulting VCPU may come in and mess | |
163 | * with things behind our backs. | |
164 | */ | |
b5331379 WD |
165 | static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size, |
166 | bool may_block) | |
4f853a71 | 167 | { |
cfb1a98d | 168 | struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); |
52bae936 | 169 | phys_addr_t end = start + size; |
4f853a71 | 170 | |
8b3405e3 | 171 | assert_spin_locked(&kvm->mmu_lock); |
47a91b72 | 172 | WARN_ON(size & ~PAGE_MASK); |
52bae936 WD |
173 | WARN_ON(stage2_apply_range(kvm, start, end, kvm_pgtable_stage2_unmap, |
174 | may_block)); | |
000d3996 MZ |
175 | } |
176 | ||
b5331379 WD |
177 | static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) |
178 | { | |
179 | __unmap_stage2_range(mmu, start, size, true); | |
180 | } | |
181 | ||
9d218a1f MZ |
182 | static void stage2_flush_memslot(struct kvm *kvm, |
183 | struct kvm_memory_slot *memslot) | |
184 | { | |
185 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; | |
186 | phys_addr_t end = addr + PAGE_SIZE * memslot->npages; | |
9d218a1f | 187 | |
8d5207be | 188 | stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_flush); |
9d218a1f MZ |
189 | } |
190 | ||
191 | /** | |
192 | * stage2_flush_vm - Invalidate cache for pages mapped in stage 2 | |
193 | * @kvm: The struct kvm pointer | |
194 | * | |
195 | * Go through the stage 2 page tables and invalidate any cache lines | |
196 | * backing memory already mapped to the VM. | |
197 | */ | |
3c1e7165 | 198 | static void stage2_flush_vm(struct kvm *kvm) |
9d218a1f MZ |
199 | { |
200 | struct kvm_memslots *slots; | |
201 | struct kvm_memory_slot *memslot; | |
202 | int idx; | |
203 | ||
204 | idx = srcu_read_lock(&kvm->srcu); | |
205 | spin_lock(&kvm->mmu_lock); | |
206 | ||
207 | slots = kvm_memslots(kvm); | |
208 | kvm_for_each_memslot(memslot, slots) | |
209 | stage2_flush_memslot(kvm, memslot); | |
210 | ||
211 | spin_unlock(&kvm->mmu_lock); | |
212 | srcu_read_unlock(&kvm->srcu, idx); | |
213 | } | |
214 | ||
342cd0ab | 215 | /** |
4f728276 | 216 | * free_hyp_pgds - free Hyp-mode page tables |
342cd0ab | 217 | */ |
4f728276 | 218 | void free_hyp_pgds(void) |
342cd0ab | 219 | { |
d157f4a5 | 220 | mutex_lock(&kvm_hyp_pgd_mutex); |
0f9d09b8 WD |
221 | if (hyp_pgtable) { |
222 | kvm_pgtable_hyp_destroy(hyp_pgtable); | |
223 | kfree(hyp_pgtable); | |
bfa79a80 | 224 | hyp_pgtable = NULL; |
26781f9c | 225 | } |
342cd0ab CD |
226 | mutex_unlock(&kvm_hyp_pgd_mutex); |
227 | } | |
228 | ||
bfa79a80 QP |
229 | static bool kvm_host_owns_hyp_mappings(void) |
230 | { | |
231 | if (static_branch_likely(&kvm_protected_mode_initialized)) | |
232 | return false; | |
233 | ||
234 | /* | |
235 | * This can happen at boot time when __create_hyp_mappings() is called | |
236 | * after the hyp protection has been enabled, but the static key has | |
237 | * not been flipped yet. | |
238 | */ | |
239 | if (!hyp_pgtable && is_protected_kvm_enabled()) | |
240 | return false; | |
241 | ||
242 | WARN_ON(!hyp_pgtable); | |
243 | ||
244 | return true; | |
245 | } | |
246 | ||
0f9d09b8 WD |
247 | static int __create_hyp_mappings(unsigned long start, unsigned long size, |
248 | unsigned long phys, enum kvm_pgtable_prot prot) | |
342cd0ab | 249 | { |
0f9d09b8 | 250 | int err; |
342cd0ab | 251 | |
bfa79a80 QP |
252 | if (!kvm_host_owns_hyp_mappings()) { |
253 | return kvm_call_hyp_nvhe(__pkvm_create_mappings, | |
254 | start, size, phys, prot); | |
255 | } | |
256 | ||
342cd0ab | 257 | mutex_lock(&kvm_hyp_pgd_mutex); |
0f9d09b8 | 258 | err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot); |
342cd0ab | 259 | mutex_unlock(&kvm_hyp_pgd_mutex); |
0f9d09b8 | 260 | |
342cd0ab CD |
261 | return err; |
262 | } | |
263 | ||
40c2729b CD |
264 | static phys_addr_t kvm_kaddr_to_phys(void *kaddr) |
265 | { | |
266 | if (!is_vmalloc_addr(kaddr)) { | |
267 | BUG_ON(!virt_addr_valid(kaddr)); | |
268 | return __pa(kaddr); | |
269 | } else { | |
270 | return page_to_phys(vmalloc_to_page(kaddr)) + | |
271 | offset_in_page(kaddr); | |
272 | } | |
273 | } | |
274 | ||
342cd0ab | 275 | /** |
06e8c3b0 | 276 | * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode |
342cd0ab CD |
277 | * @from: The virtual kernel start address of the range |
278 | * @to: The virtual kernel end address of the range (exclusive) | |
c8dddecd | 279 | * @prot: The protection to be applied to this range |
342cd0ab | 280 | * |
06e8c3b0 MZ |
281 | * The same virtual address as the kernel virtual address is also used |
282 | * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying | |
283 | * physical pages. | |
342cd0ab | 284 | */ |
0f9d09b8 | 285 | int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot) |
342cd0ab | 286 | { |
40c2729b CD |
287 | phys_addr_t phys_addr; |
288 | unsigned long virt_addr; | |
6c41a413 MZ |
289 | unsigned long start = kern_hyp_va((unsigned long)from); |
290 | unsigned long end = kern_hyp_va((unsigned long)to); | |
6060df84 | 291 | |
1e947bad MZ |
292 | if (is_kernel_in_hyp_mode()) |
293 | return 0; | |
294 | ||
40c2729b CD |
295 | start = start & PAGE_MASK; |
296 | end = PAGE_ALIGN(end); | |
6060df84 | 297 | |
40c2729b CD |
298 | for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { |
299 | int err; | |
6060df84 | 300 | |
40c2729b | 301 | phys_addr = kvm_kaddr_to_phys(from + virt_addr - start); |
0f9d09b8 | 302 | err = __create_hyp_mappings(virt_addr, PAGE_SIZE, phys_addr, |
c8dddecd | 303 | prot); |
40c2729b CD |
304 | if (err) |
305 | return err; | |
306 | } | |
307 | ||
308 | return 0; | |
342cd0ab CD |
309 | } |
310 | ||
dc2e4633 | 311 | static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size, |
0f9d09b8 WD |
312 | unsigned long *haddr, |
313 | enum kvm_pgtable_prot prot) | |
342cd0ab | 314 | { |
e3f019b3 MZ |
315 | unsigned long base; |
316 | int ret = 0; | |
6060df84 | 317 | |
bfa79a80 QP |
318 | if (!kvm_host_owns_hyp_mappings()) { |
319 | base = kvm_call_hyp_nvhe(__pkvm_create_private_mapping, | |
320 | phys_addr, size, prot); | |
321 | if (IS_ERR_OR_NULL((void *)base)) | |
322 | return PTR_ERR((void *)base); | |
323 | *haddr = base; | |
324 | ||
325 | return 0; | |
326 | } | |
327 | ||
e3f019b3 | 328 | mutex_lock(&kvm_hyp_pgd_mutex); |
6060df84 | 329 | |
e3f019b3 | 330 | /* |
656012c7 | 331 | * This assumes that we have enough space below the idmap |
e3f019b3 MZ |
332 | * page to allocate our VAs. If not, the check below will |
333 | * kick. A potential alternative would be to detect that | |
334 | * overflow and switch to an allocation above the idmap. | |
335 | * | |
336 | * The allocated size is always a multiple of PAGE_SIZE. | |
337 | */ | |
338 | size = PAGE_ALIGN(size + offset_in_page(phys_addr)); | |
339 | base = io_map_base - size; | |
1bb32a44 | 340 | |
e3f019b3 MZ |
341 | /* |
342 | * Verify that BIT(VA_BITS - 1) hasn't been flipped by | |
343 | * allocating the new area, as it would indicate we've | |
344 | * overflowed the idmap/IO address range. | |
345 | */ | |
346 | if ((base ^ io_map_base) & BIT(VA_BITS - 1)) | |
347 | ret = -ENOMEM; | |
348 | else | |
349 | io_map_base = base; | |
350 | ||
351 | mutex_unlock(&kvm_hyp_pgd_mutex); | |
352 | ||
353 | if (ret) | |
354 | goto out; | |
355 | ||
0f9d09b8 | 356 | ret = __create_hyp_mappings(base, size, phys_addr, prot); |
e3f019b3 MZ |
357 | if (ret) |
358 | goto out; | |
359 | ||
dc2e4633 | 360 | *haddr = base + offset_in_page(phys_addr); |
e3f019b3 | 361 | out: |
dc2e4633 MZ |
362 | return ret; |
363 | } | |
364 | ||
365 | /** | |
366 | * create_hyp_io_mappings - Map IO into both kernel and HYP | |
367 | * @phys_addr: The physical start address which gets mapped | |
368 | * @size: Size of the region being mapped | |
369 | * @kaddr: Kernel VA for this mapping | |
370 | * @haddr: HYP VA for this mapping | |
371 | */ | |
372 | int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, | |
373 | void __iomem **kaddr, | |
374 | void __iomem **haddr) | |
375 | { | |
376 | unsigned long addr; | |
377 | int ret; | |
378 | ||
379 | *kaddr = ioremap(phys_addr, size); | |
380 | if (!*kaddr) | |
381 | return -ENOMEM; | |
382 | ||
383 | if (is_kernel_in_hyp_mode()) { | |
384 | *haddr = *kaddr; | |
385 | return 0; | |
386 | } | |
387 | ||
388 | ret = __create_hyp_private_mapping(phys_addr, size, | |
389 | &addr, PAGE_HYP_DEVICE); | |
1bb32a44 MZ |
390 | if (ret) { |
391 | iounmap(*kaddr); | |
392 | *kaddr = NULL; | |
dc2e4633 MZ |
393 | *haddr = NULL; |
394 | return ret; | |
395 | } | |
396 | ||
397 | *haddr = (void __iomem *)addr; | |
398 | return 0; | |
399 | } | |
400 | ||
401 | /** | |
402 | * create_hyp_exec_mappings - Map an executable range into HYP | |
403 | * @phys_addr: The physical start address which gets mapped | |
404 | * @size: Size of the region being mapped | |
405 | * @haddr: HYP VA for this mapping | |
406 | */ | |
407 | int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, | |
408 | void **haddr) | |
409 | { | |
410 | unsigned long addr; | |
411 | int ret; | |
412 | ||
413 | BUG_ON(is_kernel_in_hyp_mode()); | |
414 | ||
415 | ret = __create_hyp_private_mapping(phys_addr, size, | |
416 | &addr, PAGE_HYP_EXEC); | |
417 | if (ret) { | |
418 | *haddr = NULL; | |
1bb32a44 MZ |
419 | return ret; |
420 | } | |
421 | ||
dc2e4633 | 422 | *haddr = (void *)addr; |
1bb32a44 | 423 | return 0; |
342cd0ab CD |
424 | } |
425 | ||
7aef0cbc QP |
426 | static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = { |
427 | .zalloc_page = stage2_memcache_zalloc_page, | |
428 | .zalloc_pages_exact = kvm_host_zalloc_pages_exact, | |
429 | .free_pages_exact = free_pages_exact, | |
430 | .get_page = kvm_host_get_page, | |
431 | .put_page = kvm_host_put_page, | |
432 | .page_count = kvm_host_page_count, | |
433 | .phys_to_virt = kvm_host_va, | |
434 | .virt_to_phys = kvm_host_pa, | |
435 | }; | |
436 | ||
d5d8184d | 437 | /** |
a0e50aa3 CD |
438 | * kvm_init_stage2_mmu - Initialise a S2 MMU strucrure |
439 | * @kvm: The pointer to the KVM structure | |
440 | * @mmu: The pointer to the s2 MMU structure | |
d5d8184d | 441 | * |
71233d05 | 442 | * Allocates only the stage-2 HW PGD level table(s). |
d5d8184d CD |
443 | * Note we don't need locking here as this is only called when the VM is |
444 | * created, which can only be done once. | |
445 | */ | |
a0e50aa3 | 446 | int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) |
d5d8184d | 447 | { |
71233d05 WD |
448 | int cpu, err; |
449 | struct kvm_pgtable *pgt; | |
d5d8184d | 450 | |
71233d05 | 451 | if (mmu->pgt != NULL) { |
d5d8184d CD |
452 | kvm_err("kvm_arch already initialized?\n"); |
453 | return -EINVAL; | |
454 | } | |
455 | ||
71233d05 WD |
456 | pgt = kzalloc(sizeof(*pgt), GFP_KERNEL); |
457 | if (!pgt) | |
a987370f MZ |
458 | return -ENOMEM; |
459 | ||
834cd93d | 460 | err = kvm_pgtable_stage2_init(pgt, &kvm->arch, &kvm_s2_mm_ops); |
71233d05 WD |
461 | if (err) |
462 | goto out_free_pgtable; | |
e329fb75 | 463 | |
a0e50aa3 CD |
464 | mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran)); |
465 | if (!mmu->last_vcpu_ran) { | |
71233d05 WD |
466 | err = -ENOMEM; |
467 | goto out_destroy_pgtable; | |
a0e50aa3 CD |
468 | } |
469 | ||
470 | for_each_possible_cpu(cpu) | |
471 | *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1; | |
472 | ||
cfb1a98d | 473 | mmu->arch = &kvm->arch; |
71233d05 WD |
474 | mmu->pgt = pgt; |
475 | mmu->pgd_phys = __pa(pgt->pgd); | |
a0e50aa3 | 476 | mmu->vmid.vmid_gen = 0; |
d5d8184d | 477 | return 0; |
71233d05 WD |
478 | |
479 | out_destroy_pgtable: | |
480 | kvm_pgtable_stage2_destroy(pgt); | |
481 | out_free_pgtable: | |
482 | kfree(pgt); | |
483 | return err; | |
d5d8184d CD |
484 | } |
485 | ||
957db105 CD |
486 | static void stage2_unmap_memslot(struct kvm *kvm, |
487 | struct kvm_memory_slot *memslot) | |
488 | { | |
489 | hva_t hva = memslot->userspace_addr; | |
490 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; | |
491 | phys_addr_t size = PAGE_SIZE * memslot->npages; | |
492 | hva_t reg_end = hva + size; | |
493 | ||
494 | /* | |
495 | * A memory region could potentially cover multiple VMAs, and any holes | |
496 | * between them, so iterate over all of them to find out if we should | |
497 | * unmap any of them. | |
498 | * | |
499 | * +--------------------------------------------+ | |
500 | * +---------------+----------------+ +----------------+ | |
501 | * | : VMA 1 | VMA 2 | | VMA 3 : | | |
502 | * +---------------+----------------+ +----------------+ | |
503 | * | memory region | | |
504 | * +--------------------------------------------+ | |
505 | */ | |
506 | do { | |
c728fd4c | 507 | struct vm_area_struct *vma; |
957db105 CD |
508 | hva_t vm_start, vm_end; |
509 | ||
c728fd4c GS |
510 | vma = find_vma_intersection(current->mm, hva, reg_end); |
511 | if (!vma) | |
957db105 CD |
512 | break; |
513 | ||
514 | /* | |
515 | * Take the intersection of this VMA with the memory region | |
516 | */ | |
517 | vm_start = max(hva, vma->vm_start); | |
518 | vm_end = min(reg_end, vma->vm_end); | |
519 | ||
520 | if (!(vma->vm_flags & VM_PFNMAP)) { | |
521 | gpa_t gpa = addr + (vm_start - memslot->userspace_addr); | |
a0e50aa3 | 522 | unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start); |
957db105 CD |
523 | } |
524 | hva = vm_end; | |
525 | } while (hva < reg_end); | |
526 | } | |
527 | ||
528 | /** | |
529 | * stage2_unmap_vm - Unmap Stage-2 RAM mappings | |
530 | * @kvm: The struct kvm pointer | |
531 | * | |
656012c7 | 532 | * Go through the memregions and unmap any regular RAM |
957db105 CD |
533 | * backing memory already mapped to the VM. |
534 | */ | |
535 | void stage2_unmap_vm(struct kvm *kvm) | |
536 | { | |
537 | struct kvm_memslots *slots; | |
538 | struct kvm_memory_slot *memslot; | |
539 | int idx; | |
540 | ||
541 | idx = srcu_read_lock(&kvm->srcu); | |
89154dd5 | 542 | mmap_read_lock(current->mm); |
957db105 CD |
543 | spin_lock(&kvm->mmu_lock); |
544 | ||
545 | slots = kvm_memslots(kvm); | |
546 | kvm_for_each_memslot(memslot, slots) | |
547 | stage2_unmap_memslot(kvm, memslot); | |
548 | ||
549 | spin_unlock(&kvm->mmu_lock); | |
89154dd5 | 550 | mmap_read_unlock(current->mm); |
957db105 CD |
551 | srcu_read_unlock(&kvm->srcu, idx); |
552 | } | |
553 | ||
a0e50aa3 | 554 | void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu) |
d5d8184d | 555 | { |
cfb1a98d | 556 | struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); |
71233d05 | 557 | struct kvm_pgtable *pgt = NULL; |
d5d8184d | 558 | |
8b3405e3 | 559 | spin_lock(&kvm->mmu_lock); |
71233d05 WD |
560 | pgt = mmu->pgt; |
561 | if (pgt) { | |
71233d05 WD |
562 | mmu->pgd_phys = 0; |
563 | mmu->pgt = NULL; | |
564 | free_percpu(mmu->last_vcpu_ran); | |
6c0d706b | 565 | } |
8b3405e3 SP |
566 | spin_unlock(&kvm->mmu_lock); |
567 | ||
71233d05 WD |
568 | if (pgt) { |
569 | kvm_pgtable_stage2_destroy(pgt); | |
570 | kfree(pgt); | |
a0e50aa3 | 571 | } |
d5d8184d CD |
572 | } |
573 | ||
d5d8184d CD |
574 | /** |
575 | * kvm_phys_addr_ioremap - map a device range to guest IPA | |
576 | * | |
577 | * @kvm: The KVM pointer | |
578 | * @guest_ipa: The IPA at which to insert the mapping | |
579 | * @pa: The physical address of the device | |
580 | * @size: The size of the mapping | |
c9c0279c | 581 | * @writable: Whether or not to create a writable mapping |
d5d8184d CD |
582 | */ |
583 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | |
c40f2f8f | 584 | phys_addr_t pa, unsigned long size, bool writable) |
d5d8184d | 585 | { |
02bbd374 | 586 | phys_addr_t addr; |
d5d8184d | 587 | int ret = 0; |
c1a33aeb | 588 | struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, }; |
02bbd374 WD |
589 | struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; |
590 | enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE | | |
591 | KVM_PGTABLE_PROT_R | | |
592 | (writable ? KVM_PGTABLE_PROT_W : 0); | |
d5d8184d | 593 | |
02bbd374 WD |
594 | size += offset_in_page(guest_ipa); |
595 | guest_ipa &= PAGE_MASK; | |
c40f2f8f | 596 | |
02bbd374 | 597 | for (addr = guest_ipa; addr < guest_ipa + size; addr += PAGE_SIZE) { |
c1a33aeb SC |
598 | ret = kvm_mmu_topup_memory_cache(&cache, |
599 | kvm_mmu_cache_min_pages(kvm)); | |
d5d8184d | 600 | if (ret) |
02bbd374 WD |
601 | break; |
602 | ||
d5d8184d | 603 | spin_lock(&kvm->mmu_lock); |
02bbd374 WD |
604 | ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot, |
605 | &cache); | |
d5d8184d CD |
606 | spin_unlock(&kvm->mmu_lock); |
607 | if (ret) | |
02bbd374 | 608 | break; |
d5d8184d | 609 | |
02bbd374 | 610 | pa += PAGE_SIZE; |
d5d8184d CD |
611 | } |
612 | ||
c1a33aeb | 613 | kvm_mmu_free_memory_cache(&cache); |
d5d8184d CD |
614 | return ret; |
615 | } | |
616 | ||
c6473555 MS |
617 | /** |
618 | * stage2_wp_range() - write protect stage2 memory region range | |
c9c0279c | 619 | * @mmu: The KVM stage-2 MMU pointer |
c6473555 MS |
620 | * @addr: Start address of range |
621 | * @end: End address of range | |
622 | */ | |
a0e50aa3 | 623 | static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end) |
c6473555 | 624 | { |
cfb1a98d | 625 | struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); |
cc38d61c | 626 | stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect); |
c6473555 MS |
627 | } |
628 | ||
629 | /** | |
630 | * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot | |
631 | * @kvm: The KVM pointer | |
632 | * @slot: The memory slot to write protect | |
633 | * | |
634 | * Called to start logging dirty pages after memory region | |
635 | * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns | |
4ea5af53 | 636 | * all present PUD, PMD and PTEs are write protected in the memory region. |
c6473555 MS |
637 | * Afterwards read of dirty page log can be called. |
638 | * | |
639 | * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired, | |
640 | * serializing operations for VM memory regions. | |
641 | */ | |
eab62148 | 642 | static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) |
c6473555 | 643 | { |
9f6b8029 PB |
644 | struct kvm_memslots *slots = kvm_memslots(kvm); |
645 | struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); | |
0577d1ab SC |
646 | phys_addr_t start, end; |
647 | ||
648 | if (WARN_ON_ONCE(!memslot)) | |
649 | return; | |
650 | ||
651 | start = memslot->base_gfn << PAGE_SHIFT; | |
652 | end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; | |
c6473555 MS |
653 | |
654 | spin_lock(&kvm->mmu_lock); | |
a0e50aa3 | 655 | stage2_wp_range(&kvm->arch.mmu, start, end); |
c6473555 MS |
656 | spin_unlock(&kvm->mmu_lock); |
657 | kvm_flush_remote_tlbs(kvm); | |
658 | } | |
53c810c3 MS |
659 | |
660 | /** | |
3b0f1d01 | 661 | * kvm_mmu_write_protect_pt_masked() - write protect dirty pages |
53c810c3 MS |
662 | * @kvm: The KVM pointer |
663 | * @slot: The memory slot associated with mask | |
664 | * @gfn_offset: The gfn offset in memory slot | |
665 | * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory | |
666 | * slot to be write protected | |
667 | * | |
668 | * Walks bits set in mask write protects the associated pte's. Caller must | |
669 | * acquire kvm_mmu_lock. | |
670 | */ | |
3b0f1d01 | 671 | static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, |
53c810c3 MS |
672 | struct kvm_memory_slot *slot, |
673 | gfn_t gfn_offset, unsigned long mask) | |
674 | { | |
675 | phys_addr_t base_gfn = slot->base_gfn + gfn_offset; | |
676 | phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; | |
677 | phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; | |
678 | ||
a0e50aa3 | 679 | stage2_wp_range(&kvm->arch.mmu, start, end); |
53c810c3 | 680 | } |
c6473555 | 681 | |
3b0f1d01 KH |
682 | /* |
683 | * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected | |
684 | * dirty pages. | |
685 | * | |
686 | * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to | |
687 | * enable dirty logging for them. | |
688 | */ | |
689 | void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, | |
690 | struct kvm_memory_slot *slot, | |
691 | gfn_t gfn_offset, unsigned long mask) | |
692 | { | |
693 | kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); | |
694 | } | |
695 | ||
17ab9d57 | 696 | static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) |
0d3e4d4f | 697 | { |
17ab9d57 | 698 | __clean_dcache_guest_page(pfn, size); |
a15f6939 MZ |
699 | } |
700 | ||
17ab9d57 | 701 | static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size) |
a15f6939 | 702 | { |
17ab9d57 | 703 | __invalidate_icache_guest_page(pfn, size); |
0d3e4d4f MZ |
704 | } |
705 | ||
1559b758 | 706 | static void kvm_send_hwpoison_signal(unsigned long address, short lsb) |
196f878a | 707 | { |
795a8371 | 708 | send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current); |
196f878a JM |
709 | } |
710 | ||
a80868f3 SP |
711 | static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, |
712 | unsigned long hva, | |
713 | unsigned long map_size) | |
6794ad54 | 714 | { |
c2be79a0 | 715 | gpa_t gpa_start; |
6794ad54 CD |
716 | hva_t uaddr_start, uaddr_end; |
717 | size_t size; | |
718 | ||
9f283614 SP |
719 | /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */ |
720 | if (map_size == PAGE_SIZE) | |
721 | return true; | |
722 | ||
6794ad54 CD |
723 | size = memslot->npages * PAGE_SIZE; |
724 | ||
725 | gpa_start = memslot->base_gfn << PAGE_SHIFT; | |
6794ad54 CD |
726 | |
727 | uaddr_start = memslot->userspace_addr; | |
728 | uaddr_end = uaddr_start + size; | |
729 | ||
730 | /* | |
731 | * Pages belonging to memslots that don't have the same alignment | |
a80868f3 SP |
732 | * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2 |
733 | * PMD/PUD entries, because we'll end up mapping the wrong pages. | |
6794ad54 CD |
734 | * |
735 | * Consider a layout like the following: | |
736 | * | |
737 | * memslot->userspace_addr: | |
738 | * +-----+--------------------+--------------------+---+ | |
a80868f3 | 739 | * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| |
6794ad54 CD |
740 | * +-----+--------------------+--------------------+---+ |
741 | * | |
9f283614 | 742 | * memslot->base_gfn << PAGE_SHIFT: |
6794ad54 | 743 | * +---+--------------------+--------------------+-----+ |
a80868f3 | 744 | * |abc|def Stage-2 block | Stage-2 block |tvxyz| |
6794ad54 CD |
745 | * +---+--------------------+--------------------+-----+ |
746 | * | |
a80868f3 | 747 | * If we create those stage-2 blocks, we'll end up with this incorrect |
6794ad54 CD |
748 | * mapping: |
749 | * d -> f | |
750 | * e -> g | |
751 | * f -> h | |
752 | */ | |
a80868f3 | 753 | if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1))) |
6794ad54 CD |
754 | return false; |
755 | ||
756 | /* | |
757 | * Next, let's make sure we're not trying to map anything not covered | |
a80868f3 SP |
758 | * by the memslot. This means we have to prohibit block size mappings |
759 | * for the beginning and end of a non-block aligned and non-block sized | |
6794ad54 CD |
760 | * memory slot (illustrated by the head and tail parts of the |
761 | * userspace view above containing pages 'abcde' and 'xyz', | |
762 | * respectively). | |
763 | * | |
764 | * Note that it doesn't matter if we do the check using the | |
765 | * userspace_addr or the base_gfn, as both are equally aligned (per | |
766 | * the check above) and equally sized. | |
767 | */ | |
a80868f3 SP |
768 | return (hva & ~(map_size - 1)) >= uaddr_start && |
769 | (hva & ~(map_size - 1)) + map_size <= uaddr_end; | |
6794ad54 CD |
770 | } |
771 | ||
0529c902 SP |
772 | /* |
773 | * Check if the given hva is backed by a transparent huge page (THP) and | |
774 | * whether it can be mapped using block mapping in stage2. If so, adjust | |
775 | * the stage2 PFN and IPA accordingly. Only PMD_SIZE THPs are currently | |
776 | * supported. This will need to be updated to support other THP sizes. | |
777 | * | |
778 | * Returns the size of the mapping. | |
779 | */ | |
780 | static unsigned long | |
781 | transparent_hugepage_adjust(struct kvm_memory_slot *memslot, | |
782 | unsigned long hva, kvm_pfn_t *pfnp, | |
783 | phys_addr_t *ipap) | |
784 | { | |
785 | kvm_pfn_t pfn = *pfnp; | |
786 | ||
787 | /* | |
788 | * Make sure the adjustment is done only for THP pages. Also make | |
789 | * sure that the HVA and IPA are sufficiently aligned and that the | |
790 | * block map is contained within the memslot. | |
791 | */ | |
792 | if (kvm_is_transparent_hugepage(pfn) && | |
793 | fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) { | |
794 | /* | |
795 | * The address we faulted on is backed by a transparent huge | |
796 | * page. However, because we map the compound huge page and | |
797 | * not the individual tail page, we need to transfer the | |
798 | * refcount to the head page. We have to be careful that the | |
799 | * THP doesn't start to split while we are adjusting the | |
800 | * refcounts. | |
801 | * | |
802 | * We are sure this doesn't happen, because mmu_notifier_retry | |
803 | * was successful and we are holding the mmu_lock, so if this | |
804 | * THP is trying to split, it will be blocked in the mmu | |
805 | * notifier before touching any of the pages, specifically | |
806 | * before being able to call __split_huge_page_refcount(). | |
807 | * | |
808 | * We can therefore safely transfer the refcount from PG_tail | |
809 | * to PG_head and switch the pfn from a tail page to the head | |
810 | * page accordingly. | |
811 | */ | |
812 | *ipap &= PMD_MASK; | |
813 | kvm_release_pfn_clean(pfn); | |
814 | pfn &= ~(PTRS_PER_PMD - 1); | |
815 | kvm_get_pfn(pfn); | |
816 | *pfnp = pfn; | |
817 | ||
818 | return PMD_SIZE; | |
819 | } | |
820 | ||
821 | /* Use page mapping if we cannot use block mapping. */ | |
822 | return PAGE_SIZE; | |
823 | } | |
824 | ||
94f8e641 | 825 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
98047888 | 826 | struct kvm_memory_slot *memslot, unsigned long hva, |
94f8e641 CD |
827 | unsigned long fault_status) |
828 | { | |
ffd1b63a | 829 | int ret = 0; |
6396b852 | 830 | bool write_fault, writable, force_pte = false; |
6f745f1b WD |
831 | bool exec_fault; |
832 | bool device = false; | |
94f8e641 | 833 | unsigned long mmu_seq; |
ad361f09 | 834 | struct kvm *kvm = vcpu->kvm; |
94f8e641 | 835 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; |
ad361f09 | 836 | struct vm_area_struct *vma; |
1559b758 | 837 | short vma_shift; |
6f745f1b | 838 | gfn_t gfn; |
ba049e93 | 839 | kvm_pfn_t pfn; |
15a49a44 | 840 | bool logging_active = memslot_is_logging(memslot); |
7d894834 YW |
841 | unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu); |
842 | unsigned long vma_pagesize, fault_granule; | |
6f745f1b WD |
843 | enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; |
844 | struct kvm_pgtable *pgt; | |
94f8e641 | 845 | |
7d894834 | 846 | fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level); |
a7d079ce | 847 | write_fault = kvm_is_write_fault(vcpu); |
c4ad98e4 | 848 | exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); |
d0e22b4a MZ |
849 | VM_BUG_ON(write_fault && exec_fault); |
850 | ||
851 | if (fault_status == FSC_PERM && !write_fault && !exec_fault) { | |
94f8e641 CD |
852 | kvm_err("Unexpected L2 read permission error\n"); |
853 | return -EFAULT; | |
854 | } | |
855 | ||
ad361f09 | 856 | /* Let's check if we will get back a huge page backed by hugetlbfs */ |
89154dd5 | 857 | mmap_read_lock(current->mm); |
ad361f09 | 858 | vma = find_vma_intersection(current->mm, hva, hva + 1); |
37b54408 AB |
859 | if (unlikely(!vma)) { |
860 | kvm_err("Failed to find VMA for hva 0x%lx\n", hva); | |
89154dd5 | 861 | mmap_read_unlock(current->mm); |
37b54408 AB |
862 | return -EFAULT; |
863 | } | |
864 | ||
1559b758 JM |
865 | if (is_vm_hugetlb_page(vma)) |
866 | vma_shift = huge_page_shift(hstate_vma(vma)); | |
867 | else | |
868 | vma_shift = PAGE_SHIFT; | |
869 | ||
a80868f3 | 870 | if (logging_active || |
523b3999 | 871 | (vma->vm_flags & VM_PFNMAP)) { |
a80868f3 | 872 | force_pte = true; |
523b3999 AE |
873 | vma_shift = PAGE_SHIFT; |
874 | } | |
875 | ||
2f40c460 | 876 | switch (vma_shift) { |
faf00039 | 877 | #ifndef __PAGETABLE_PMD_FOLDED |
2f40c460 GS |
878 | case PUD_SHIFT: |
879 | if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE)) | |
880 | break; | |
881 | fallthrough; | |
faf00039 | 882 | #endif |
2f40c460 GS |
883 | case CONT_PMD_SHIFT: |
884 | vma_shift = PMD_SHIFT; | |
885 | fallthrough; | |
886 | case PMD_SHIFT: | |
887 | if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) | |
888 | break; | |
889 | fallthrough; | |
890 | case CONT_PTE_SHIFT: | |
523b3999 | 891 | vma_shift = PAGE_SHIFT; |
2f40c460 GS |
892 | force_pte = true; |
893 | fallthrough; | |
894 | case PAGE_SHIFT: | |
895 | break; | |
896 | default: | |
897 | WARN_ONCE(1, "Unknown vma_shift %d", vma_shift); | |
a80868f3 SP |
898 | } |
899 | ||
523b3999 | 900 | vma_pagesize = 1UL << vma_shift; |
6f745f1b | 901 | if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) |
523b3999 | 902 | fault_ipa &= ~(vma_pagesize - 1); |
6f745f1b WD |
903 | |
904 | gfn = fault_ipa >> PAGE_SHIFT; | |
89154dd5 | 905 | mmap_read_unlock(current->mm); |
ad361f09 | 906 | |
6f745f1b WD |
907 | /* |
908 | * Permission faults just need to update the existing leaf entry, | |
909 | * and so normally don't require allocations from the memcache. The | |
910 | * only exception to this is when dirty logging is enabled at runtime | |
911 | * and a write fault needs to collapse a block entry into a table. | |
912 | */ | |
913 | if (fault_status != FSC_PERM || (logging_active && write_fault)) { | |
914 | ret = kvm_mmu_topup_memory_cache(memcache, | |
915 | kvm_mmu_cache_min_pages(kvm)); | |
916 | if (ret) | |
917 | return ret; | |
918 | } | |
94f8e641 CD |
919 | |
920 | mmu_seq = vcpu->kvm->mmu_notifier_seq; | |
921 | /* | |
922 | * Ensure the read of mmu_notifier_seq happens before we call | |
923 | * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk | |
924 | * the page we just got a reference to gets unmapped before we have a | |
925 | * chance to grab the mmu_lock, which ensure that if the page gets | |
cd4c7183 | 926 | * unmapped afterwards, the call to kvm_unmap_gfn will take it away |
94f8e641 CD |
927 | * from us again properly. This smp_rmb() interacts with the smp_wmb() |
928 | * in kvm_mmu_notifier_invalidate_<page|range_end>. | |
10ba2d17 GS |
929 | * |
930 | * Besides, __gfn_to_pfn_memslot() instead of gfn_to_pfn_prot() is | |
931 | * used to avoid unnecessary overhead introduced to locate the memory | |
932 | * slot because it's always fixed even @gfn is adjusted for huge pages. | |
94f8e641 CD |
933 | */ |
934 | smp_rmb(); | |
935 | ||
10ba2d17 GS |
936 | pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, |
937 | write_fault, &writable, NULL); | |
196f878a | 938 | if (pfn == KVM_PFN_ERR_HWPOISON) { |
1559b758 | 939 | kvm_send_hwpoison_signal(hva, vma_shift); |
196f878a JM |
940 | return 0; |
941 | } | |
9ac71595 | 942 | if (is_error_noslot_pfn(pfn)) |
94f8e641 CD |
943 | return -EFAULT; |
944 | ||
15a49a44 | 945 | if (kvm_is_device_pfn(pfn)) { |
6f745f1b | 946 | device = true; |
91a2c34b | 947 | force_pte = true; |
6f745f1b | 948 | } else if (logging_active && !write_fault) { |
15a49a44 MS |
949 | /* |
950 | * Only actually map the page as writable if this was a write | |
951 | * fault. | |
952 | */ | |
6f745f1b | 953 | writable = false; |
15a49a44 | 954 | } |
b8865767 | 955 | |
6f745f1b | 956 | if (exec_fault && device) |
6d674e28 MZ |
957 | return -ENOEXEC; |
958 | ||
ad361f09 | 959 | spin_lock(&kvm->mmu_lock); |
6f745f1b | 960 | pgt = vcpu->arch.hw_mmu->pgt; |
ad361f09 | 961 | if (mmu_notifier_retry(kvm, mmu_seq)) |
94f8e641 | 962 | goto out_unlock; |
15a49a44 | 963 | |
0529c902 SP |
964 | /* |
965 | * If we are not forced to use page mapping, check if we are | |
966 | * backed by a THP and thus use block mapping if possible. | |
967 | */ | |
968 | if (vma_pagesize == PAGE_SIZE && !force_pte) | |
969 | vma_pagesize = transparent_hugepage_adjust(memslot, hva, | |
970 | &pfn, &fault_ipa); | |
509552e6 | 971 | if (writable) |
6f745f1b | 972 | prot |= KVM_PGTABLE_PROT_W; |
ad361f09 | 973 | |
6f745f1b | 974 | if (fault_status != FSC_PERM && !device) |
3f58bf63 PA |
975 | clean_dcache_guest_page(pfn, vma_pagesize); |
976 | ||
6f745f1b WD |
977 | if (exec_fault) { |
978 | prot |= KVM_PGTABLE_PROT_X; | |
3f58bf63 | 979 | invalidate_icache_guest_page(pfn, vma_pagesize); |
6f745f1b | 980 | } |
3f58bf63 | 981 | |
6f745f1b WD |
982 | if (device) |
983 | prot |= KVM_PGTABLE_PROT_DEVICE; | |
984 | else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) | |
985 | prot |= KVM_PGTABLE_PROT_X; | |
a15f6939 | 986 | |
7d894834 YW |
987 | /* |
988 | * Under the premise of getting a FSC_PERM fault, we just need to relax | |
989 | * permissions only if vma_pagesize equals fault_granule. Otherwise, | |
990 | * kvm_pgtable_stage2_map() should be called to change block size. | |
991 | */ | |
992 | if (fault_status == FSC_PERM && vma_pagesize == fault_granule) { | |
6f745f1b | 993 | ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot); |
ad361f09 | 994 | } else { |
6f745f1b WD |
995 | ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize, |
996 | __pfn_to_phys(pfn), prot, | |
997 | memcache); | |
94f8e641 | 998 | } |
ad361f09 | 999 | |
509552e6 YW |
1000 | /* Mark the page dirty only if the fault is handled successfully */ |
1001 | if (writable && !ret) { | |
1002 | kvm_set_pfn_dirty(pfn); | |
10ba2d17 | 1003 | mark_page_dirty_in_slot(kvm, memslot, gfn); |
509552e6 YW |
1004 | } |
1005 | ||
94f8e641 | 1006 | out_unlock: |
ad361f09 | 1007 | spin_unlock(&kvm->mmu_lock); |
35307b9a | 1008 | kvm_set_pfn_accessed(pfn); |
94f8e641 | 1009 | kvm_release_pfn_clean(pfn); |
509552e6 | 1010 | return ret != -EAGAIN ? ret : 0; |
94f8e641 CD |
1011 | } |
1012 | ||
ee8efad7 | 1013 | /* Resolve the access fault by making the page young again. */ |
aeda9130 MZ |
1014 | static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) |
1015 | { | |
ee8efad7 WD |
1016 | pte_t pte; |
1017 | kvm_pte_t kpte; | |
1018 | struct kvm_s2_mmu *mmu; | |
aeda9130 MZ |
1019 | |
1020 | trace_kvm_access_fault(fault_ipa); | |
1021 | ||
1022 | spin_lock(&vcpu->kvm->mmu_lock); | |
ee8efad7 WD |
1023 | mmu = vcpu->arch.hw_mmu; |
1024 | kpte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa); | |
aeda9130 | 1025 | spin_unlock(&vcpu->kvm->mmu_lock); |
ee8efad7 WD |
1026 | |
1027 | pte = __pte(kpte); | |
1028 | if (pte_valid(pte)) | |
1029 | kvm_set_pfn_accessed(pte_pfn(pte)); | |
aeda9130 MZ |
1030 | } |
1031 | ||
94f8e641 CD |
1032 | /** |
1033 | * kvm_handle_guest_abort - handles all 2nd stage aborts | |
1034 | * @vcpu: the VCPU pointer | |
94f8e641 CD |
1035 | * |
1036 | * Any abort that gets to the host is almost guaranteed to be caused by a | |
1037 | * missing second stage translation table entry, which can mean that either the | |
1038 | * guest simply needs more memory and we must allocate an appropriate page or it | |
1039 | * can mean that the guest tried to access I/O memory, which is emulated by user | |
1040 | * space. The distinction is based on the IPA causing the fault and whether this | |
1041 | * memory region has been registered as standard RAM by user space. | |
1042 | */ | |
74cc7e0c | 1043 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) |
342cd0ab | 1044 | { |
94f8e641 CD |
1045 | unsigned long fault_status; |
1046 | phys_addr_t fault_ipa; | |
1047 | struct kvm_memory_slot *memslot; | |
98047888 CD |
1048 | unsigned long hva; |
1049 | bool is_iabt, write_fault, writable; | |
94f8e641 CD |
1050 | gfn_t gfn; |
1051 | int ret, idx; | |
1052 | ||
621f48e4 TB |
1053 | fault_status = kvm_vcpu_trap_get_fault_type(vcpu); |
1054 | ||
1055 | fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); | |
bb428921 | 1056 | is_iabt = kvm_vcpu_trap_is_iabt(vcpu); |
621f48e4 | 1057 | |
bb428921 | 1058 | /* Synchronous External Abort? */ |
c9a636f2 | 1059 | if (kvm_vcpu_abt_issea(vcpu)) { |
bb428921 JM |
1060 | /* |
1061 | * For RAS the host kernel may handle this abort. | |
1062 | * There is no need to pass the error into the guest. | |
1063 | */ | |
84b951a8 | 1064 | if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu))) |
bb428921 | 1065 | kvm_inject_vabt(vcpu); |
84b951a8 WD |
1066 | |
1067 | return 1; | |
4055710b MZ |
1068 | } |
1069 | ||
3a949f4c | 1070 | trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu), |
7393b599 | 1071 | kvm_vcpu_get_hfar(vcpu), fault_ipa); |
94f8e641 CD |
1072 | |
1073 | /* Check the stage-2 fault is trans. fault or write fault */ | |
35307b9a MZ |
1074 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM && |
1075 | fault_status != FSC_ACCESS) { | |
0496daa5 CD |
1076 | kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n", |
1077 | kvm_vcpu_trap_get_class(vcpu), | |
1078 | (unsigned long)kvm_vcpu_trap_get_fault(vcpu), | |
3a949f4c | 1079 | (unsigned long)kvm_vcpu_get_esr(vcpu)); |
94f8e641 CD |
1080 | return -EFAULT; |
1081 | } | |
1082 | ||
1083 | idx = srcu_read_lock(&vcpu->kvm->srcu); | |
1084 | ||
1085 | gfn = fault_ipa >> PAGE_SHIFT; | |
98047888 CD |
1086 | memslot = gfn_to_memslot(vcpu->kvm, gfn); |
1087 | hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); | |
a7d079ce | 1088 | write_fault = kvm_is_write_fault(vcpu); |
98047888 | 1089 | if (kvm_is_error_hva(hva) || (write_fault && !writable)) { |
022c8328 WD |
1090 | /* |
1091 | * The guest has put either its instructions or its page-tables | |
1092 | * somewhere it shouldn't have. Userspace won't be able to do | |
1093 | * anything about this (there's no syndrome for a start), so | |
1094 | * re-inject the abort back into the guest. | |
1095 | */ | |
94f8e641 | 1096 | if (is_iabt) { |
6d674e28 MZ |
1097 | ret = -ENOEXEC; |
1098 | goto out; | |
94f8e641 CD |
1099 | } |
1100 | ||
c4ad98e4 | 1101 | if (kvm_vcpu_abt_iss1tw(vcpu)) { |
022c8328 WD |
1102 | kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
1103 | ret = 1; | |
1104 | goto out_unlock; | |
1105 | } | |
1106 | ||
57c841f1 MZ |
1107 | /* |
1108 | * Check for a cache maintenance operation. Since we | |
1109 | * ended-up here, we know it is outside of any memory | |
1110 | * slot. But we can't find out if that is for a device, | |
1111 | * or if the guest is just being stupid. The only thing | |
1112 | * we know for sure is that this range cannot be cached. | |
1113 | * | |
1114 | * So let's assume that the guest is just being | |
1115 | * cautious, and skip the instruction. | |
1116 | */ | |
54dc0d24 | 1117 | if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) { |
cdb5e02e | 1118 | kvm_incr_pc(vcpu); |
57c841f1 MZ |
1119 | ret = 1; |
1120 | goto out_unlock; | |
1121 | } | |
1122 | ||
cfe3950c MZ |
1123 | /* |
1124 | * The IPA is reported as [MAX:12], so we need to | |
1125 | * complement it with the bottom 12 bits from the | |
1126 | * faulting VA. This is always 12 bits, irrespective | |
1127 | * of the page size. | |
1128 | */ | |
1129 | fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1); | |
74cc7e0c | 1130 | ret = io_mem_abort(vcpu, fault_ipa); |
94f8e641 CD |
1131 | goto out_unlock; |
1132 | } | |
1133 | ||
c3058d5d | 1134 | /* Userspace should not be able to register out-of-bounds IPAs */ |
e55cac5b | 1135 | VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm)); |
c3058d5d | 1136 | |
aeda9130 MZ |
1137 | if (fault_status == FSC_ACCESS) { |
1138 | handle_access_fault(vcpu, fault_ipa); | |
1139 | ret = 1; | |
1140 | goto out_unlock; | |
1141 | } | |
1142 | ||
98047888 | 1143 | ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); |
94f8e641 CD |
1144 | if (ret == 0) |
1145 | ret = 1; | |
6d674e28 MZ |
1146 | out: |
1147 | if (ret == -ENOEXEC) { | |
1148 | kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); | |
1149 | ret = 1; | |
1150 | } | |
94f8e641 CD |
1151 | out_unlock: |
1152 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | |
1153 | return ret; | |
342cd0ab CD |
1154 | } |
1155 | ||
cd4c7183 | 1156 | bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) |
d5d8184d | 1157 | { |
cd4c7183 | 1158 | if (!kvm->arch.mmu.pgt) |
fcb82839 | 1159 | return false; |
d5d8184d | 1160 | |
cd4c7183 SC |
1161 | __unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT, |
1162 | (range->end - range->start) << PAGE_SHIFT, | |
1163 | range->may_block); | |
b5331379 | 1164 | |
fcb82839 | 1165 | return false; |
d5d8184d CD |
1166 | } |
1167 | ||
cd4c7183 | 1168 | bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) |
d5d8184d | 1169 | { |
cd4c7183 SC |
1170 | kvm_pfn_t pfn = pte_pfn(range->pte); |
1171 | ||
063deeb1 | 1172 | if (!kvm->arch.mmu.pgt) |
fcb82839 | 1173 | return false; |
d5d8184d | 1174 | |
cd4c7183 | 1175 | WARN_ON(range->end - range->start != 1); |
d5d8184d | 1176 | |
cd4c7183 SC |
1177 | /* |
1178 | * We've moved a page around, probably through CoW, so let's treat it | |
1179 | * just like a translation fault and clean the cache to the PoC. | |
1180 | */ | |
1181 | clean_dcache_guest_page(pfn, PAGE_SIZE); | |
e9edb17a | 1182 | |
15a49a44 | 1183 | /* |
e9edb17a | 1184 | * The MMU notifiers will have unmapped a huge PMD before calling |
cd4c7183 | 1185 | * ->change_pte() (which in turn calls kvm_set_spte_gfn()) and |
e9edb17a WD |
1186 | * therefore we never need to clear out a huge PMD through this |
1187 | * calling path and a memcache is not required. | |
15a49a44 | 1188 | */ |
cd4c7183 SC |
1189 | kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT, |
1190 | PAGE_SIZE, __pfn_to_phys(pfn), | |
1191 | KVM_PGTABLE_PROT_R, NULL); | |
1192 | ||
fcb82839 | 1193 | return false; |
d5d8184d CD |
1194 | } |
1195 | ||
cd4c7183 | 1196 | bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) |
d5d8184d | 1197 | { |
cd4c7183 SC |
1198 | u64 size = (range->end - range->start) << PAGE_SHIFT; |
1199 | kvm_pte_t kpte; | |
1200 | pte_t pte; | |
d5d8184d | 1201 | |
e9edb17a | 1202 | if (!kvm->arch.mmu.pgt) |
fcb82839 | 1203 | return false; |
d5d8184d | 1204 | |
35a63966 | 1205 | WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); |
cd4c7183 SC |
1206 | |
1207 | kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt, | |
1208 | range->start << PAGE_SHIFT); | |
ee8efad7 WD |
1209 | pte = __pte(kpte); |
1210 | return pte_valid(pte) && pte_young(pte); | |
35307b9a MZ |
1211 | } |
1212 | ||
cd4c7183 | 1213 | bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) |
35307b9a | 1214 | { |
063deeb1 | 1215 | if (!kvm->arch.mmu.pgt) |
fcb82839 | 1216 | return false; |
501b9185 | 1217 | |
cd4c7183 SC |
1218 | return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, |
1219 | range->start << PAGE_SHIFT); | |
35307b9a MZ |
1220 | } |
1221 | ||
342cd0ab CD |
1222 | phys_addr_t kvm_mmu_get_httbr(void) |
1223 | { | |
0f9d09b8 | 1224 | return __pa(hyp_pgtable->pgd); |
342cd0ab CD |
1225 | } |
1226 | ||
5a677ce0 MZ |
1227 | phys_addr_t kvm_get_idmap_vector(void) |
1228 | { | |
1229 | return hyp_idmap_vector; | |
1230 | } | |
1231 | ||
0f9d09b8 | 1232 | static int kvm_map_idmap_text(void) |
0535a3e2 | 1233 | { |
0f9d09b8 WD |
1234 | unsigned long size = hyp_idmap_end - hyp_idmap_start; |
1235 | int err = __create_hyp_mappings(hyp_idmap_start, size, hyp_idmap_start, | |
1236 | PAGE_HYP_EXEC); | |
0535a3e2 MZ |
1237 | if (err) |
1238 | kvm_err("Failed to idmap %lx-%lx\n", | |
1239 | hyp_idmap_start, hyp_idmap_end); | |
1240 | ||
1241 | return err; | |
1242 | } | |
1243 | ||
7aef0cbc QP |
1244 | static void *kvm_hyp_zalloc_page(void *arg) |
1245 | { | |
1246 | return (void *)get_zeroed_page(GFP_KERNEL); | |
1247 | } | |
1248 | ||
1249 | static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = { | |
1250 | .zalloc_page = kvm_hyp_zalloc_page, | |
1251 | .get_page = kvm_host_get_page, | |
1252 | .put_page = kvm_host_put_page, | |
1253 | .phys_to_virt = kvm_host_va, | |
1254 | .virt_to_phys = kvm_host_pa, | |
1255 | }; | |
1256 | ||
bfa79a80 | 1257 | int kvm_mmu_init(u32 *hyp_va_bits) |
342cd0ab | 1258 | { |
2fb41059 MZ |
1259 | int err; |
1260 | ||
0a78791c | 1261 | hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start); |
46fef158 | 1262 | hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE); |
0a78791c | 1263 | hyp_idmap_end = __pa_symbol(__hyp_idmap_text_end); |
46fef158 | 1264 | hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE); |
0a78791c | 1265 | hyp_idmap_vector = __pa_symbol(__kvm_hyp_init); |
5a677ce0 | 1266 | |
06f75a1f AB |
1267 | /* |
1268 | * We rely on the linker script to ensure at build time that the HYP | |
1269 | * init code does not cross a page boundary. | |
1270 | */ | |
1271 | BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); | |
5a677ce0 | 1272 | |
bfa79a80 QP |
1273 | *hyp_va_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET); |
1274 | kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits); | |
b4ef0499 MZ |
1275 | kvm_debug("IDMAP page: %lx\n", hyp_idmap_start); |
1276 | kvm_debug("HYP VA range: %lx:%lx\n", | |
1277 | kern_hyp_va(PAGE_OFFSET), | |
1278 | kern_hyp_va((unsigned long)high_memory - 1)); | |
eac378a9 | 1279 | |
6c41a413 | 1280 | if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && |
ed57cac8 | 1281 | hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) && |
d2896d4b | 1282 | hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) { |
eac378a9 MZ |
1283 | /* |
1284 | * The idmap page is intersecting with the VA space, | |
1285 | * it is not safe to continue further. | |
1286 | */ | |
1287 | kvm_err("IDMAP intersecting with HYP VA, unable to continue\n"); | |
1288 | err = -EINVAL; | |
1289 | goto out; | |
1290 | } | |
1291 | ||
0f9d09b8 WD |
1292 | hyp_pgtable = kzalloc(sizeof(*hyp_pgtable), GFP_KERNEL); |
1293 | if (!hyp_pgtable) { | |
1294 | kvm_err("Hyp mode page-table not allocated\n"); | |
2fb41059 MZ |
1295 | err = -ENOMEM; |
1296 | goto out; | |
1297 | } | |
1298 | ||
bfa79a80 | 1299 | err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops); |
0f9d09b8 WD |
1300 | if (err) |
1301 | goto out_free_pgtable; | |
d5d8184d | 1302 | |
0f9d09b8 WD |
1303 | err = kvm_map_idmap_text(); |
1304 | if (err) | |
1305 | goto out_destroy_pgtable; | |
5a677ce0 | 1306 | |
e3f019b3 | 1307 | io_map_base = hyp_idmap_start; |
d5d8184d | 1308 | return 0; |
0f9d09b8 WD |
1309 | |
1310 | out_destroy_pgtable: | |
1311 | kvm_pgtable_hyp_destroy(hyp_pgtable); | |
1312 | out_free_pgtable: | |
1313 | kfree(hyp_pgtable); | |
1314 | hyp_pgtable = NULL; | |
2fb41059 | 1315 | out: |
2fb41059 | 1316 | return err; |
342cd0ab | 1317 | } |
df6ce24f EA |
1318 | |
1319 | void kvm_arch_commit_memory_region(struct kvm *kvm, | |
09170a49 | 1320 | const struct kvm_userspace_memory_region *mem, |
9d4c197c | 1321 | struct kvm_memory_slot *old, |
f36f3f28 | 1322 | const struct kvm_memory_slot *new, |
df6ce24f EA |
1323 | enum kvm_mr_change change) |
1324 | { | |
c6473555 MS |
1325 | /* |
1326 | * At this point memslot has been committed and there is an | |
656012c7 | 1327 | * allocated dirty_bitmap[], dirty pages will be tracked while the |
c6473555 MS |
1328 | * memory slot is write protected. |
1329 | */ | |
c862626e KZ |
1330 | if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { |
1331 | /* | |
1332 | * If we're with initial-all-set, we don't need to write | |
1333 | * protect any pages because they're all reported as dirty. | |
1334 | * Huge pages and normal pages will be write protect gradually. | |
1335 | */ | |
1336 | if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) { | |
1337 | kvm_mmu_wp_memory_region(kvm, mem->slot); | |
1338 | } | |
1339 | } | |
df6ce24f EA |
1340 | } |
1341 | ||
1342 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | |
1343 | struct kvm_memory_slot *memslot, | |
09170a49 | 1344 | const struct kvm_userspace_memory_region *mem, |
df6ce24f EA |
1345 | enum kvm_mr_change change) |
1346 | { | |
8eef9123 AB |
1347 | hva_t hva = mem->userspace_addr; |
1348 | hva_t reg_end = hva + mem->memory_size; | |
1349 | bool writable = !(mem->flags & KVM_MEM_READONLY); | |
1350 | int ret = 0; | |
1351 | ||
15a49a44 MS |
1352 | if (change != KVM_MR_CREATE && change != KVM_MR_MOVE && |
1353 | change != KVM_MR_FLAGS_ONLY) | |
8eef9123 AB |
1354 | return 0; |
1355 | ||
c3058d5d CD |
1356 | /* |
1357 | * Prevent userspace from creating a memory region outside of the IPA | |
1358 | * space addressable by the KVM guest IPA space. | |
1359 | */ | |
262b003d | 1360 | if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT)) |
c3058d5d CD |
1361 | return -EFAULT; |
1362 | ||
89154dd5 | 1363 | mmap_read_lock(current->mm); |
8eef9123 AB |
1364 | /* |
1365 | * A memory region could potentially cover multiple VMAs, and any holes | |
1366 | * between them, so iterate over all of them to find out if we can map | |
1367 | * any of them right now. | |
1368 | * | |
1369 | * +--------------------------------------------+ | |
1370 | * +---------------+----------------+ +----------------+ | |
1371 | * | : VMA 1 | VMA 2 | | VMA 3 : | | |
1372 | * +---------------+----------------+ +----------------+ | |
1373 | * | memory region | | |
1374 | * +--------------------------------------------+ | |
1375 | */ | |
1376 | do { | |
c728fd4c | 1377 | struct vm_area_struct *vma; |
8eef9123 AB |
1378 | hva_t vm_start, vm_end; |
1379 | ||
c728fd4c GS |
1380 | vma = find_vma_intersection(current->mm, hva, reg_end); |
1381 | if (!vma) | |
8eef9123 AB |
1382 | break; |
1383 | ||
8eef9123 AB |
1384 | /* |
1385 | * Take the intersection of this VMA with the memory region | |
1386 | */ | |
1387 | vm_start = max(hva, vma->vm_start); | |
1388 | vm_end = min(reg_end, vma->vm_end); | |
1389 | ||
1390 | if (vma->vm_flags & VM_PFNMAP) { | |
1391 | gpa_t gpa = mem->guest_phys_addr + | |
1392 | (vm_start - mem->userspace_addr); | |
ca09f02f MM |
1393 | phys_addr_t pa; |
1394 | ||
1395 | pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; | |
1396 | pa += vm_start - vma->vm_start; | |
8eef9123 | 1397 | |
15a49a44 | 1398 | /* IO region dirty page logging not allowed */ |
72f31048 MZ |
1399 | if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) { |
1400 | ret = -EINVAL; | |
1401 | goto out; | |
1402 | } | |
15a49a44 | 1403 | |
8eef9123 AB |
1404 | ret = kvm_phys_addr_ioremap(kvm, gpa, pa, |
1405 | vm_end - vm_start, | |
1406 | writable); | |
1407 | if (ret) | |
1408 | break; | |
1409 | } | |
1410 | hva = vm_end; | |
1411 | } while (hva < reg_end); | |
1412 | ||
15a49a44 | 1413 | if (change == KVM_MR_FLAGS_ONLY) |
72f31048 | 1414 | goto out; |
15a49a44 | 1415 | |
849260c7 AB |
1416 | spin_lock(&kvm->mmu_lock); |
1417 | if (ret) | |
a0e50aa3 | 1418 | unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr, mem->memory_size); |
ada329e6 | 1419 | else if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) |
849260c7 AB |
1420 | stage2_flush_memslot(kvm, memslot); |
1421 | spin_unlock(&kvm->mmu_lock); | |
72f31048 | 1422 | out: |
89154dd5 | 1423 | mmap_read_unlock(current->mm); |
8eef9123 | 1424 | return ret; |
df6ce24f EA |
1425 | } |
1426 | ||
e96c81ee | 1427 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) |
df6ce24f EA |
1428 | { |
1429 | } | |
1430 | ||
15248258 | 1431 | void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) |
df6ce24f EA |
1432 | { |
1433 | } | |
1434 | ||
1435 | void kvm_arch_flush_shadow_all(struct kvm *kvm) | |
1436 | { | |
a0e50aa3 | 1437 | kvm_free_stage2_pgd(&kvm->arch.mmu); |
df6ce24f EA |
1438 | } |
1439 | ||
1440 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | |
1441 | struct kvm_memory_slot *slot) | |
1442 | { | |
8eef9123 AB |
1443 | gpa_t gpa = slot->base_gfn << PAGE_SHIFT; |
1444 | phys_addr_t size = slot->npages << PAGE_SHIFT; | |
1445 | ||
1446 | spin_lock(&kvm->mmu_lock); | |
a0e50aa3 | 1447 | unmap_stage2_range(&kvm->arch.mmu, gpa, size); |
8eef9123 | 1448 | spin_unlock(&kvm->mmu_lock); |
df6ce24f | 1449 | } |
3c1e7165 MZ |
1450 | |
1451 | /* | |
1452 | * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). | |
1453 | * | |
1454 | * Main problems: | |
1455 | * - S/W ops are local to a CPU (not broadcast) | |
1456 | * - We have line migration behind our back (speculation) | |
1457 | * - System caches don't support S/W at all (damn!) | |
1458 | * | |
1459 | * In the face of the above, the best we can do is to try and convert | |
1460 | * S/W ops to VA ops. Because the guest is not allowed to infer the | |
1461 | * S/W to PA mapping, it can only use S/W to nuke the whole cache, | |
1462 | * which is a rather good thing for us. | |
1463 | * | |
1464 | * Also, it is only used when turning caches on/off ("The expected | |
1465 | * usage of the cache maintenance instructions that operate by set/way | |
1466 | * is associated with the cache maintenance instructions associated | |
1467 | * with the powerdown and powerup of caches, if this is required by | |
1468 | * the implementation."). | |
1469 | * | |
1470 | * We use the following policy: | |
1471 | * | |
1472 | * - If we trap a S/W operation, we enable VM trapping to detect | |
1473 | * caches being turned on/off, and do a full clean. | |
1474 | * | |
1475 | * - We flush the caches on both caches being turned on and off. | |
1476 | * | |
1477 | * - Once the caches are enabled, we stop trapping VM ops. | |
1478 | */ | |
1479 | void kvm_set_way_flush(struct kvm_vcpu *vcpu) | |
1480 | { | |
3df59d8d | 1481 | unsigned long hcr = *vcpu_hcr(vcpu); |
3c1e7165 MZ |
1482 | |
1483 | /* | |
1484 | * If this is the first time we do a S/W operation | |
1485 | * (i.e. HCR_TVM not set) flush the whole memory, and set the | |
1486 | * VM trapping. | |
1487 | * | |
1488 | * Otherwise, rely on the VM trapping to wait for the MMU + | |
1489 | * Caches to be turned off. At that point, we'll be able to | |
1490 | * clean the caches again. | |
1491 | */ | |
1492 | if (!(hcr & HCR_TVM)) { | |
1493 | trace_kvm_set_way_flush(*vcpu_pc(vcpu), | |
1494 | vcpu_has_cache_enabled(vcpu)); | |
1495 | stage2_flush_vm(vcpu->kvm); | |
3df59d8d | 1496 | *vcpu_hcr(vcpu) = hcr | HCR_TVM; |
3c1e7165 MZ |
1497 | } |
1498 | } | |
1499 | ||
1500 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled) | |
1501 | { | |
1502 | bool now_enabled = vcpu_has_cache_enabled(vcpu); | |
1503 | ||
1504 | /* | |
1505 | * If switching the MMU+caches on, need to invalidate the caches. | |
1506 | * If switching it off, need to clean the caches. | |
1507 | * Clean + invalidate does the trick always. | |
1508 | */ | |
1509 | if (now_enabled != was_enabled) | |
1510 | stage2_flush_vm(vcpu->kvm); | |
1511 | ||
1512 | /* Caches are now on, stop trapping VM ops (until a S/W op) */ | |
1513 | if (now_enabled) | |
3df59d8d | 1514 | *vcpu_hcr(vcpu) &= ~HCR_TVM; |
3c1e7165 MZ |
1515 | |
1516 | trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled); | |
1517 | } |