Commit | Line | Data |
---|---|---|
d94d71cb | 1 | // SPDX-License-Identifier: GPL-2.0-only |
749cf76c CD |
2 | /* |
3 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
4 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
749cf76c | 5 | */ |
342cd0ab CD |
6 | |
7 | #include <linux/mman.h> | |
8 | #include <linux/kvm_host.h> | |
9 | #include <linux/io.h> | |
ad361f09 | 10 | #include <linux/hugetlb.h> |
196f878a | 11 | #include <linux/sched/signal.h> |
45e96ea6 | 12 | #include <trace/events/kvm.h> |
342cd0ab | 13 | #include <asm/pgalloc.h> |
94f8e641 | 14 | #include <asm/cacheflush.h> |
342cd0ab CD |
15 | #include <asm/kvm_arm.h> |
16 | #include <asm/kvm_mmu.h> | |
0f9d09b8 | 17 | #include <asm/kvm_pgtable.h> |
0db5e022 | 18 | #include <asm/kvm_ras.h> |
d5d8184d | 19 | #include <asm/kvm_asm.h> |
94f8e641 | 20 | #include <asm/kvm_emulate.h> |
1e947bad | 21 | #include <asm/virt.h> |
d5d8184d CD |
22 | |
23 | #include "trace.h" | |
342cd0ab | 24 | |
0f9d09b8 | 25 | static struct kvm_pgtable *hyp_pgtable; |
342cd0ab CD |
26 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); |
27 | ||
8d20bd63 SC |
28 | static unsigned long __ro_after_init hyp_idmap_start; |
29 | static unsigned long __ro_after_init hyp_idmap_end; | |
30 | static phys_addr_t __ro_after_init hyp_idmap_vector; | |
5a677ce0 | 31 | |
8d20bd63 | 32 | static unsigned long __ro_after_init io_map_base; |
e3f019b3 | 33 | |
e7bf7a49 RK |
34 | static phys_addr_t __stage2_range_addr_end(phys_addr_t addr, phys_addr_t end, |
35 | phys_addr_t size) | |
5994bc9e | 36 | { |
5994bc9e OU |
37 | phys_addr_t boundary = ALIGN_DOWN(addr + size, size); |
38 | ||
39 | return (boundary - 1 < end - 1) ? boundary : end; | |
40 | } | |
6d674e28 | 41 | |
e7bf7a49 RK |
42 | static phys_addr_t stage2_range_addr_end(phys_addr_t addr, phys_addr_t end) |
43 | { | |
44 | phys_addr_t size = kvm_granule_size(KVM_PGTABLE_MIN_BLOCK_LEVEL); | |
45 | ||
46 | return __stage2_range_addr_end(addr, end, size); | |
47 | } | |
48 | ||
52bae936 WD |
49 | /* |
50 | * Release kvm_mmu_lock periodically if the memory region is large. Otherwise, | |
51 | * we may see kernel panics with CONFIG_DETECT_HUNG_TASK, | |
52 | * CONFIG_LOCKUP_DETECTOR, CONFIG_LOCKDEP. Additionally, holding the lock too | |
53 | * long will also starve other vCPUs. We have to also make sure that the page | |
54 | * tables are not freed while we released the lock. | |
55 | */ | |
8531bd63 | 56 | static int stage2_apply_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, |
52bae936 WD |
57 | phys_addr_t end, |
58 | int (*fn)(struct kvm_pgtable *, u64, u64), | |
59 | bool resched) | |
60 | { | |
8531bd63 | 61 | struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); |
52bae936 WD |
62 | int ret; |
63 | u64 next; | |
64 | ||
65 | do { | |
8531bd63 | 66 | struct kvm_pgtable *pgt = mmu->pgt; |
52bae936 WD |
67 | if (!pgt) |
68 | return -EINVAL; | |
69 | ||
5994bc9e | 70 | next = stage2_range_addr_end(addr, end); |
52bae936 WD |
71 | ret = fn(pgt, addr, next - addr); |
72 | if (ret) | |
73 | break; | |
74 | ||
75 | if (resched && next != end) | |
fcc5bf89 | 76 | cond_resched_rwlock_write(&kvm->mmu_lock); |
52bae936 WD |
77 | } while (addr = next, addr != end); |
78 | ||
79 | return ret; | |
80 | } | |
81 | ||
8531bd63 MZ |
82 | #define stage2_apply_range_resched(mmu, addr, end, fn) \ |
83 | stage2_apply_range(mmu, addr, end, fn, true) | |
cc38d61c | 84 | |
e7bf7a49 RK |
85 | /* |
86 | * Get the maximum number of page-tables pages needed to split a range | |
87 | * of blocks into PAGE_SIZE PTEs. It assumes the range is already | |
88 | * mapped at level 2, or at level 1 if allowed. | |
89 | */ | |
90 | static int kvm_mmu_split_nr_page_tables(u64 range) | |
91 | { | |
92 | int n = 0; | |
93 | ||
94 | if (KVM_PGTABLE_MIN_BLOCK_LEVEL < 2) | |
14c3555f AB |
95 | n += DIV_ROUND_UP(range, PUD_SIZE); |
96 | n += DIV_ROUND_UP(range, PMD_SIZE); | |
e7bf7a49 RK |
97 | return n; |
98 | } | |
99 | ||
100 | static bool need_split_memcache_topup_or_resched(struct kvm *kvm) | |
101 | { | |
102 | struct kvm_mmu_memory_cache *cache; | |
103 | u64 chunk_size, min; | |
104 | ||
105 | if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) | |
106 | return true; | |
107 | ||
108 | chunk_size = kvm->arch.mmu.split_page_chunk_size; | |
109 | min = kvm_mmu_split_nr_page_tables(chunk_size); | |
110 | cache = &kvm->arch.mmu.split_page_cache; | |
111 | return kvm_mmu_memory_cache_nr_free_objects(cache) < min; | |
112 | } | |
113 | ||
114 | static int kvm_mmu_split_huge_pages(struct kvm *kvm, phys_addr_t addr, | |
115 | phys_addr_t end) | |
116 | { | |
117 | struct kvm_mmu_memory_cache *cache; | |
118 | struct kvm_pgtable *pgt; | |
119 | int ret, cache_capacity; | |
120 | u64 next, chunk_size; | |
121 | ||
122 | lockdep_assert_held_write(&kvm->mmu_lock); | |
123 | ||
124 | chunk_size = kvm->arch.mmu.split_page_chunk_size; | |
125 | cache_capacity = kvm_mmu_split_nr_page_tables(chunk_size); | |
126 | ||
127 | if (chunk_size == 0) | |
128 | return 0; | |
129 | ||
130 | cache = &kvm->arch.mmu.split_page_cache; | |
131 | ||
132 | do { | |
133 | if (need_split_memcache_topup_or_resched(kvm)) { | |
134 | write_unlock(&kvm->mmu_lock); | |
135 | cond_resched(); | |
136 | /* Eager page splitting is best-effort. */ | |
137 | ret = __kvm_mmu_topup_memory_cache(cache, | |
138 | cache_capacity, | |
139 | cache_capacity); | |
140 | write_lock(&kvm->mmu_lock); | |
141 | if (ret) | |
142 | break; | |
143 | } | |
144 | ||
145 | pgt = kvm->arch.mmu.pgt; | |
146 | if (!pgt) | |
147 | return -EINVAL; | |
148 | ||
149 | next = __stage2_range_addr_end(addr, end, chunk_size); | |
150 | ret = kvm_pgtable_stage2_split(pgt, addr, next - addr, cache); | |
151 | if (ret) | |
152 | break; | |
153 | } while (addr = next, addr != end); | |
154 | ||
155 | return ret; | |
156 | } | |
157 | ||
15a49a44 MS |
158 | static bool memslot_is_logging(struct kvm_memory_slot *memslot) |
159 | { | |
15a49a44 | 160 | return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); |
7276030a MS |
161 | } |
162 | ||
163 | /** | |
32121c81 | 164 | * kvm_arch_flush_remote_tlbs() - flush all VM TLB entries for v7/8 |
7276030a MS |
165 | * @kvm: pointer to kvm structure. |
166 | * | |
167 | * Interface to HYP function to flush all VM TLB entries | |
168 | */ | |
32121c81 | 169 | int kvm_arch_flush_remote_tlbs(struct kvm *kvm) |
7276030a | 170 | { |
a0e50aa3 | 171 | kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu); |
32121c81 | 172 | return 0; |
15a49a44 | 173 | } |
ad361f09 | 174 | |
c42b6f0b RRA |
175 | int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, |
176 | gfn_t gfn, u64 nr_pages) | |
177 | { | |
178 | kvm_tlb_flush_vmid_range(&kvm->arch.mmu, | |
179 | gfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); | |
180 | return 0; | |
181 | } | |
182 | ||
e6fab544 AB |
183 | static bool kvm_is_device_pfn(unsigned long pfn) |
184 | { | |
873ba463 | 185 | return !pfn_is_map_memory(pfn); |
e6fab544 AB |
186 | } |
187 | ||
7aef0cbc QP |
188 | static void *stage2_memcache_zalloc_page(void *arg) |
189 | { | |
190 | struct kvm_mmu_memory_cache *mc = arg; | |
d38ba8cc | 191 | void *virt; |
7aef0cbc QP |
192 | |
193 | /* Allocated with __GFP_ZERO, so no need to zero */ | |
d38ba8cc YA |
194 | virt = kvm_mmu_memory_cache_alloc(mc); |
195 | if (virt) | |
196 | kvm_account_pgtable_pages(virt, 1); | |
197 | return virt; | |
7aef0cbc QP |
198 | } |
199 | ||
200 | static void *kvm_host_zalloc_pages_exact(size_t size) | |
201 | { | |
202 | return alloc_pages_exact(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); | |
203 | } | |
204 | ||
d38ba8cc YA |
205 | static void *kvm_s2_zalloc_pages_exact(size_t size) |
206 | { | |
207 | void *virt = kvm_host_zalloc_pages_exact(size); | |
208 | ||
209 | if (virt) | |
210 | kvm_account_pgtable_pages(virt, (size >> PAGE_SHIFT)); | |
211 | return virt; | |
212 | } | |
213 | ||
214 | static void kvm_s2_free_pages_exact(void *virt, size_t size) | |
215 | { | |
216 | kvm_account_pgtable_pages(virt, -(size >> PAGE_SHIFT)); | |
217 | free_pages_exact(virt, size); | |
218 | } | |
219 | ||
5c359cca OU |
220 | static struct kvm_pgtable_mm_ops kvm_s2_mm_ops; |
221 | ||
c14d08c5 | 222 | static void stage2_free_unlinked_table_rcu_cb(struct rcu_head *head) |
c3119ae4 OU |
223 | { |
224 | struct page *page = container_of(head, struct page, rcu_head); | |
225 | void *pgtable = page_to_virt(page); | |
419edf48 | 226 | s8 level = page_private(page); |
c3119ae4 | 227 | |
c14d08c5 | 228 | kvm_pgtable_stage2_free_unlinked(&kvm_s2_mm_ops, pgtable, level); |
c3119ae4 OU |
229 | } |
230 | ||
419edf48 | 231 | static void stage2_free_unlinked_table(void *addr, s8 level) |
5c359cca | 232 | { |
c3119ae4 OU |
233 | struct page *page = virt_to_page(addr); |
234 | ||
235 | set_page_private(page, (unsigned long)level); | |
c14d08c5 | 236 | call_rcu(&page->rcu_head, stage2_free_unlinked_table_rcu_cb); |
5c359cca OU |
237 | } |
238 | ||
7aef0cbc QP |
239 | static void kvm_host_get_page(void *addr) |
240 | { | |
241 | get_page(virt_to_page(addr)); | |
242 | } | |
243 | ||
244 | static void kvm_host_put_page(void *addr) | |
245 | { | |
246 | put_page(virt_to_page(addr)); | |
247 | } | |
248 | ||
d38ba8cc YA |
249 | static void kvm_s2_put_page(void *addr) |
250 | { | |
251 | struct page *p = virt_to_page(addr); | |
252 | /* Dropping last refcount, the page will be freed */ | |
253 | if (page_count(p) == 1) | |
254 | kvm_account_pgtable_pages(addr, -1); | |
255 | put_page(p); | |
256 | } | |
257 | ||
7aef0cbc QP |
258 | static int kvm_host_page_count(void *addr) |
259 | { | |
260 | return page_count(virt_to_page(addr)); | |
261 | } | |
262 | ||
263 | static phys_addr_t kvm_host_pa(void *addr) | |
264 | { | |
265 | return __pa(addr); | |
266 | } | |
267 | ||
268 | static void *kvm_host_va(phys_addr_t phys) | |
269 | { | |
270 | return __va(phys); | |
271 | } | |
272 | ||
378e6a9c YW |
273 | static void clean_dcache_guest_page(void *va, size_t size) |
274 | { | |
275 | __clean_dcache_guest_page(va, size); | |
276 | } | |
277 | ||
278 | static void invalidate_icache_guest_page(void *va, size_t size) | |
279 | { | |
280 | __invalidate_icache_guest_page(va, size); | |
281 | } | |
282 | ||
363ef89f MZ |
283 | /* |
284 | * Unmapping vs dcache management: | |
285 | * | |
286 | * If a guest maps certain memory pages as uncached, all writes will | |
287 | * bypass the data cache and go directly to RAM. However, the CPUs | |
288 | * can still speculate reads (not writes) and fill cache lines with | |
289 | * data. | |
290 | * | |
291 | * Those cache lines will be *clean* cache lines though, so a | |
292 | * clean+invalidate operation is equivalent to an invalidate | |
293 | * operation, because no cache lines are marked dirty. | |
294 | * | |
295 | * Those clean cache lines could be filled prior to an uncached write | |
296 | * by the guest, and the cache coherent IO subsystem would therefore | |
297 | * end up writing old data to disk. | |
298 | * | |
299 | * This is why right after unmapping a page/section and invalidating | |
52bae936 WD |
300 | * the corresponding TLBs, we flush to make sure the IO subsystem will |
301 | * never hit in the cache. | |
e48d53a9 MZ |
302 | * |
303 | * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as | |
304 | * we then fully enforce cacheability of RAM, no matter what the guest | |
305 | * does. | |
363ef89f | 306 | */ |
7a1c831e | 307 | /** |
ffd9eaff | 308 | * __unmap_stage2_range -- Clear stage2 page table entries to unmap a range |
c9c0279c | 309 | * @mmu: The KVM stage-2 MMU pointer |
7a1c831e SP |
310 | * @start: The intermediate physical base address of the range to unmap |
311 | * @size: The size of the area to unmap | |
c9c0279c | 312 | * @may_block: Whether or not we are permitted to block |
7a1c831e SP |
313 | * |
314 | * Clear a range of stage-2 mappings, lowering the various ref-counts. Must | |
315 | * be called while holding mmu_lock (unless for freeing the stage2 pgd before | |
316 | * destroying the VM), otherwise another faulting VCPU may come in and mess | |
317 | * with things behind our backs. | |
318 | */ | |
b5331379 WD |
319 | static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size, |
320 | bool may_block) | |
4f853a71 | 321 | { |
cfb1a98d | 322 | struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); |
52bae936 | 323 | phys_addr_t end = start + size; |
4f853a71 | 324 | |
fcc5bf89 | 325 | lockdep_assert_held_write(&kvm->mmu_lock); |
47a91b72 | 326 | WARN_ON(size & ~PAGE_MASK); |
8531bd63 | 327 | WARN_ON(stage2_apply_range(mmu, start, end, kvm_pgtable_stage2_unmap, |
52bae936 | 328 | may_block)); |
000d3996 MZ |
329 | } |
330 | ||
b5331379 WD |
331 | static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) |
332 | { | |
333 | __unmap_stage2_range(mmu, start, size, true); | |
334 | } | |
335 | ||
9d218a1f MZ |
336 | static void stage2_flush_memslot(struct kvm *kvm, |
337 | struct kvm_memory_slot *memslot) | |
338 | { | |
339 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; | |
340 | phys_addr_t end = addr + PAGE_SIZE * memslot->npages; | |
9d218a1f | 341 | |
8531bd63 | 342 | stage2_apply_range_resched(&kvm->arch.mmu, addr, end, kvm_pgtable_stage2_flush); |
9d218a1f MZ |
343 | } |
344 | ||
345 | /** | |
346 | * stage2_flush_vm - Invalidate cache for pages mapped in stage 2 | |
347 | * @kvm: The struct kvm pointer | |
348 | * | |
349 | * Go through the stage 2 page tables and invalidate any cache lines | |
350 | * backing memory already mapped to the VM. | |
351 | */ | |
3c1e7165 | 352 | static void stage2_flush_vm(struct kvm *kvm) |
9d218a1f MZ |
353 | { |
354 | struct kvm_memslots *slots; | |
355 | struct kvm_memory_slot *memslot; | |
a54d8066 | 356 | int idx, bkt; |
9d218a1f MZ |
357 | |
358 | idx = srcu_read_lock(&kvm->srcu); | |
fcc5bf89 | 359 | write_lock(&kvm->mmu_lock); |
9d218a1f MZ |
360 | |
361 | slots = kvm_memslots(kvm); | |
a54d8066 | 362 | kvm_for_each_memslot(memslot, bkt, slots) |
9d218a1f MZ |
363 | stage2_flush_memslot(kvm, memslot); |
364 | ||
fcc5bf89 | 365 | write_unlock(&kvm->mmu_lock); |
9d218a1f MZ |
366 | srcu_read_unlock(&kvm->srcu, idx); |
367 | } | |
368 | ||
342cd0ab | 369 | /** |
4f728276 | 370 | * free_hyp_pgds - free Hyp-mode page tables |
342cd0ab | 371 | */ |
8d20bd63 | 372 | void __init free_hyp_pgds(void) |
342cd0ab | 373 | { |
d157f4a5 | 374 | mutex_lock(&kvm_hyp_pgd_mutex); |
0f9d09b8 WD |
375 | if (hyp_pgtable) { |
376 | kvm_pgtable_hyp_destroy(hyp_pgtable); | |
377 | kfree(hyp_pgtable); | |
bfa79a80 | 378 | hyp_pgtable = NULL; |
26781f9c | 379 | } |
342cd0ab CD |
380 | mutex_unlock(&kvm_hyp_pgd_mutex); |
381 | } | |
382 | ||
bfa79a80 QP |
383 | static bool kvm_host_owns_hyp_mappings(void) |
384 | { | |
64a1fbda QP |
385 | if (is_kernel_in_hyp_mode()) |
386 | return false; | |
387 | ||
bfa79a80 QP |
388 | if (static_branch_likely(&kvm_protected_mode_initialized)) |
389 | return false; | |
390 | ||
391 | /* | |
392 | * This can happen at boot time when __create_hyp_mappings() is called | |
393 | * after the hyp protection has been enabled, but the static key has | |
394 | * not been flipped yet. | |
395 | */ | |
396 | if (!hyp_pgtable && is_protected_kvm_enabled()) | |
397 | return false; | |
398 | ||
399 | WARN_ON(!hyp_pgtable); | |
400 | ||
401 | return true; | |
402 | } | |
403 | ||
ce335431 KS |
404 | int __create_hyp_mappings(unsigned long start, unsigned long size, |
405 | unsigned long phys, enum kvm_pgtable_prot prot) | |
342cd0ab | 406 | { |
0f9d09b8 | 407 | int err; |
342cd0ab | 408 | |
66c57edd QP |
409 | if (WARN_ON(!kvm_host_owns_hyp_mappings())) |
410 | return -EINVAL; | |
bfa79a80 | 411 | |
342cd0ab | 412 | mutex_lock(&kvm_hyp_pgd_mutex); |
0f9d09b8 | 413 | err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot); |
342cd0ab | 414 | mutex_unlock(&kvm_hyp_pgd_mutex); |
0f9d09b8 | 415 | |
342cd0ab CD |
416 | return err; |
417 | } | |
418 | ||
40c2729b CD |
419 | static phys_addr_t kvm_kaddr_to_phys(void *kaddr) |
420 | { | |
421 | if (!is_vmalloc_addr(kaddr)) { | |
422 | BUG_ON(!virt_addr_valid(kaddr)); | |
423 | return __pa(kaddr); | |
424 | } else { | |
425 | return page_to_phys(vmalloc_to_page(kaddr)) + | |
426 | offset_in_page(kaddr); | |
427 | } | |
428 | } | |
429 | ||
a83e2191 QP |
430 | struct hyp_shared_pfn { |
431 | u64 pfn; | |
432 | int count; | |
433 | struct rb_node node; | |
434 | }; | |
435 | ||
436 | static DEFINE_MUTEX(hyp_shared_pfns_lock); | |
437 | static struct rb_root hyp_shared_pfns = RB_ROOT; | |
438 | ||
439 | static struct hyp_shared_pfn *find_shared_pfn(u64 pfn, struct rb_node ***node, | |
440 | struct rb_node **parent) | |
66c57edd | 441 | { |
a83e2191 QP |
442 | struct hyp_shared_pfn *this; |
443 | ||
444 | *node = &hyp_shared_pfns.rb_node; | |
445 | *parent = NULL; | |
446 | while (**node) { | |
447 | this = container_of(**node, struct hyp_shared_pfn, node); | |
448 | *parent = **node; | |
449 | if (this->pfn < pfn) | |
450 | *node = &((**node)->rb_left); | |
451 | else if (this->pfn > pfn) | |
452 | *node = &((**node)->rb_right); | |
453 | else | |
454 | return this; | |
455 | } | |
66c57edd | 456 | |
a83e2191 QP |
457 | return NULL; |
458 | } | |
459 | ||
460 | static int share_pfn_hyp(u64 pfn) | |
66c57edd | 461 | { |
a83e2191 QP |
462 | struct rb_node **node, *parent; |
463 | struct hyp_shared_pfn *this; | |
464 | int ret = 0; | |
465 | ||
466 | mutex_lock(&hyp_shared_pfns_lock); | |
467 | this = find_shared_pfn(pfn, &node, &parent); | |
468 | if (this) { | |
469 | this->count++; | |
470 | goto unlock; | |
66c57edd QP |
471 | } |
472 | ||
a83e2191 QP |
473 | this = kzalloc(sizeof(*this), GFP_KERNEL); |
474 | if (!this) { | |
475 | ret = -ENOMEM; | |
476 | goto unlock; | |
477 | } | |
478 | ||
479 | this->pfn = pfn; | |
480 | this->count = 1; | |
481 | rb_link_node(&this->node, parent, node); | |
482 | rb_insert_color(&this->node, &hyp_shared_pfns); | |
483 | ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp, pfn, 1); | |
484 | unlock: | |
485 | mutex_unlock(&hyp_shared_pfns_lock); | |
486 | ||
487 | return ret; | |
66c57edd QP |
488 | } |
489 | ||
52b28657 | 490 | static int unshare_pfn_hyp(u64 pfn) |
66c57edd | 491 | { |
52b28657 QP |
492 | struct rb_node **node, *parent; |
493 | struct hyp_shared_pfn *this; | |
494 | int ret = 0; | |
495 | ||
496 | mutex_lock(&hyp_shared_pfns_lock); | |
497 | this = find_shared_pfn(pfn, &node, &parent); | |
498 | if (WARN_ON(!this)) { | |
499 | ret = -ENOENT; | |
500 | goto unlock; | |
501 | } | |
502 | ||
503 | this->count--; | |
504 | if (this->count) | |
505 | goto unlock; | |
506 | ||
507 | rb_erase(&this->node, &hyp_shared_pfns); | |
508 | kfree(this); | |
509 | ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_hyp, pfn, 1); | |
510 | unlock: | |
511 | mutex_unlock(&hyp_shared_pfns_lock); | |
512 | ||
513 | return ret; | |
514 | } | |
515 | ||
3f868e14 QP |
516 | int kvm_share_hyp(void *from, void *to) |
517 | { | |
a83e2191 QP |
518 | phys_addr_t start, end, cur; |
519 | u64 pfn; | |
66c57edd QP |
520 | int ret; |
521 | ||
3f868e14 QP |
522 | if (is_kernel_in_hyp_mode()) |
523 | return 0; | |
524 | ||
525 | /* | |
526 | * The share hcall maps things in the 'fixed-offset' region of the hyp | |
527 | * VA space, so we can only share physically contiguous data-structures | |
528 | * for now. | |
529 | */ | |
530 | if (is_vmalloc_or_module_addr(from) || is_vmalloc_or_module_addr(to)) | |
531 | return -EINVAL; | |
532 | ||
533 | if (kvm_host_owns_hyp_mappings()) | |
534 | return create_hyp_mappings(from, to, PAGE_HYP); | |
535 | ||
a83e2191 QP |
536 | start = ALIGN_DOWN(__pa(from), PAGE_SIZE); |
537 | end = PAGE_ALIGN(__pa(to)); | |
538 | for (cur = start; cur < end; cur += PAGE_SIZE) { | |
539 | pfn = __phys_to_pfn(cur); | |
540 | ret = share_pfn_hyp(pfn); | |
66c57edd QP |
541 | if (ret) |
542 | return ret; | |
543 | } | |
544 | ||
545 | return 0; | |
546 | } | |
547 | ||
52b28657 QP |
548 | void kvm_unshare_hyp(void *from, void *to) |
549 | { | |
550 | phys_addr_t start, end, cur; | |
551 | u64 pfn; | |
552 | ||
553 | if (is_kernel_in_hyp_mode() || kvm_host_owns_hyp_mappings() || !from) | |
554 | return; | |
555 | ||
556 | start = ALIGN_DOWN(__pa(from), PAGE_SIZE); | |
557 | end = PAGE_ALIGN(__pa(to)); | |
558 | for (cur = start; cur < end; cur += PAGE_SIZE) { | |
559 | pfn = __phys_to_pfn(cur); | |
560 | WARN_ON(unshare_pfn_hyp(pfn)); | |
561 | } | |
562 | } | |
563 | ||
342cd0ab | 564 | /** |
06e8c3b0 | 565 | * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode |
342cd0ab CD |
566 | * @from: The virtual kernel start address of the range |
567 | * @to: The virtual kernel end address of the range (exclusive) | |
c8dddecd | 568 | * @prot: The protection to be applied to this range |
342cd0ab | 569 | * |
06e8c3b0 MZ |
570 | * The same virtual address as the kernel virtual address is also used |
571 | * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying | |
572 | * physical pages. | |
342cd0ab | 573 | */ |
0f9d09b8 | 574 | int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot) |
342cd0ab | 575 | { |
40c2729b CD |
576 | phys_addr_t phys_addr; |
577 | unsigned long virt_addr; | |
6c41a413 MZ |
578 | unsigned long start = kern_hyp_va((unsigned long)from); |
579 | unsigned long end = kern_hyp_va((unsigned long)to); | |
6060df84 | 580 | |
1e947bad MZ |
581 | if (is_kernel_in_hyp_mode()) |
582 | return 0; | |
583 | ||
3f868e14 QP |
584 | if (!kvm_host_owns_hyp_mappings()) |
585 | return -EPERM; | |
66c57edd | 586 | |
40c2729b CD |
587 | start = start & PAGE_MASK; |
588 | end = PAGE_ALIGN(end); | |
6060df84 | 589 | |
40c2729b CD |
590 | for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { |
591 | int err; | |
6060df84 | 592 | |
40c2729b | 593 | phys_addr = kvm_kaddr_to_phys(from + virt_addr - start); |
0f9d09b8 | 594 | err = __create_hyp_mappings(virt_addr, PAGE_SIZE, phys_addr, |
c8dddecd | 595 | prot); |
40c2729b CD |
596 | if (err) |
597 | return err; | |
598 | } | |
599 | ||
600 | return 0; | |
342cd0ab CD |
601 | } |
602 | ||
f156a7d1 VD |
603 | static int __hyp_alloc_private_va_range(unsigned long base) |
604 | { | |
605 | lockdep_assert_held(&kvm_hyp_pgd_mutex); | |
606 | ||
607 | if (!PAGE_ALIGNED(base)) | |
608 | return -EINVAL; | |
609 | ||
610 | /* | |
611 | * Verify that BIT(VA_BITS - 1) hasn't been flipped by | |
612 | * allocating the new area, as it would indicate we've | |
613 | * overflowed the idmap/IO address range. | |
614 | */ | |
615 | if ((base ^ io_map_base) & BIT(VA_BITS - 1)) | |
616 | return -ENOMEM; | |
617 | ||
618 | io_map_base = base; | |
619 | ||
620 | return 0; | |
621 | } | |
92abe0f8 KS |
622 | |
623 | /** | |
624 | * hyp_alloc_private_va_range - Allocates a private VA range. | |
625 | * @size: The size of the VA range to reserve. | |
626 | * @haddr: The hypervisor virtual start address of the allocation. | |
627 | * | |
628 | * The private virtual address (VA) range is allocated below io_map_base | |
629 | * and aligned based on the order of @size. | |
630 | * | |
631 | * Return: 0 on success or negative error code on failure. | |
632 | */ | |
633 | int hyp_alloc_private_va_range(size_t size, unsigned long *haddr) | |
342cd0ab | 634 | { |
e3f019b3 MZ |
635 | unsigned long base; |
636 | int ret = 0; | |
6060df84 | 637 | |
e3f019b3 | 638 | mutex_lock(&kvm_hyp_pgd_mutex); |
6060df84 | 639 | |
e3f019b3 | 640 | /* |
656012c7 | 641 | * This assumes that we have enough space below the idmap |
f156a7d1 VD |
642 | * page to allocate our VAs. If not, the check in |
643 | * __hyp_alloc_private_va_range() will kick. A potential | |
644 | * alternative would be to detect that overflow and switch | |
645 | * to an allocation above the idmap. | |
e3f019b3 MZ |
646 | * |
647 | * The allocated size is always a multiple of PAGE_SIZE. | |
648 | */ | |
f156a7d1 VD |
649 | size = PAGE_ALIGN(size); |
650 | base = io_map_base - size; | |
651 | ret = __hyp_alloc_private_va_range(base); | |
e3f019b3 MZ |
652 | |
653 | mutex_unlock(&kvm_hyp_pgd_mutex); | |
654 | ||
3579dc74 MZ |
655 | if (!ret) |
656 | *haddr = base; | |
657 | ||
92abe0f8 KS |
658 | return ret; |
659 | } | |
660 | ||
661 | static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size, | |
662 | unsigned long *haddr, | |
663 | enum kvm_pgtable_prot prot) | |
664 | { | |
665 | unsigned long addr; | |
666 | int ret = 0; | |
667 | ||
668 | if (!kvm_host_owns_hyp_mappings()) { | |
669 | addr = kvm_call_hyp_nvhe(__pkvm_create_private_mapping, | |
670 | phys_addr, size, prot); | |
671 | if (IS_ERR_VALUE(addr)) | |
672 | return addr; | |
673 | *haddr = addr; | |
674 | ||
675 | return 0; | |
676 | } | |
677 | ||
678 | size = PAGE_ALIGN(size + offset_in_page(phys_addr)); | |
679 | ret = hyp_alloc_private_va_range(size, &addr); | |
e3f019b3 | 680 | if (ret) |
92abe0f8 | 681 | return ret; |
e3f019b3 | 682 | |
92abe0f8 | 683 | ret = __create_hyp_mappings(addr, size, phys_addr, prot); |
e3f019b3 | 684 | if (ret) |
92abe0f8 | 685 | return ret; |
e3f019b3 | 686 | |
92abe0f8 | 687 | *haddr = addr + offset_in_page(phys_addr); |
dc2e4633 MZ |
688 | return ret; |
689 | } | |
690 | ||
f156a7d1 VD |
691 | int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr) |
692 | { | |
693 | unsigned long base; | |
694 | size_t size; | |
695 | int ret; | |
696 | ||
697 | mutex_lock(&kvm_hyp_pgd_mutex); | |
698 | /* | |
699 | * Efficient stack verification using the PAGE_SHIFT bit implies | |
700 | * an alignment of our allocation on the order of the size. | |
701 | */ | |
702 | size = PAGE_SIZE * 2; | |
703 | base = ALIGN_DOWN(io_map_base - size, size); | |
704 | ||
705 | ret = __hyp_alloc_private_va_range(base); | |
706 | ||
707 | mutex_unlock(&kvm_hyp_pgd_mutex); | |
708 | ||
709 | if (ret) { | |
710 | kvm_err("Cannot allocate hyp stack guard page\n"); | |
711 | return ret; | |
712 | } | |
713 | ||
714 | /* | |
715 | * Since the stack grows downwards, map the stack to the page | |
716 | * at the higher address and leave the lower guard page | |
717 | * unbacked. | |
718 | * | |
719 | * Any valid stack address now has the PAGE_SHIFT bit as 1 | |
720 | * and addresses corresponding to the guard page have the | |
721 | * PAGE_SHIFT bit as 0 - this is used for overflow detection. | |
722 | */ | |
723 | ret = __create_hyp_mappings(base + PAGE_SIZE, PAGE_SIZE, phys_addr, | |
724 | PAGE_HYP); | |
725 | if (ret) | |
726 | kvm_err("Cannot map hyp stack\n"); | |
727 | ||
728 | *haddr = base + size; | |
729 | ||
730 | return ret; | |
731 | } | |
732 | ||
dc2e4633 MZ |
733 | /** |
734 | * create_hyp_io_mappings - Map IO into both kernel and HYP | |
735 | * @phys_addr: The physical start address which gets mapped | |
736 | * @size: Size of the region being mapped | |
737 | * @kaddr: Kernel VA for this mapping | |
738 | * @haddr: HYP VA for this mapping | |
739 | */ | |
740 | int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, | |
741 | void __iomem **kaddr, | |
742 | void __iomem **haddr) | |
743 | { | |
744 | unsigned long addr; | |
745 | int ret; | |
746 | ||
bff01cb6 QP |
747 | if (is_protected_kvm_enabled()) |
748 | return -EPERM; | |
749 | ||
dc2e4633 MZ |
750 | *kaddr = ioremap(phys_addr, size); |
751 | if (!*kaddr) | |
752 | return -ENOMEM; | |
753 | ||
754 | if (is_kernel_in_hyp_mode()) { | |
755 | *haddr = *kaddr; | |
756 | return 0; | |
757 | } | |
758 | ||
759 | ret = __create_hyp_private_mapping(phys_addr, size, | |
760 | &addr, PAGE_HYP_DEVICE); | |
1bb32a44 MZ |
761 | if (ret) { |
762 | iounmap(*kaddr); | |
763 | *kaddr = NULL; | |
dc2e4633 MZ |
764 | *haddr = NULL; |
765 | return ret; | |
766 | } | |
767 | ||
768 | *haddr = (void __iomem *)addr; | |
769 | return 0; | |
770 | } | |
771 | ||
772 | /** | |
773 | * create_hyp_exec_mappings - Map an executable range into HYP | |
774 | * @phys_addr: The physical start address which gets mapped | |
775 | * @size: Size of the region being mapped | |
776 | * @haddr: HYP VA for this mapping | |
777 | */ | |
778 | int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, | |
779 | void **haddr) | |
780 | { | |
781 | unsigned long addr; | |
782 | int ret; | |
783 | ||
784 | BUG_ON(is_kernel_in_hyp_mode()); | |
785 | ||
786 | ret = __create_hyp_private_mapping(phys_addr, size, | |
787 | &addr, PAGE_HYP_EXEC); | |
788 | if (ret) { | |
789 | *haddr = NULL; | |
1bb32a44 MZ |
790 | return ret; |
791 | } | |
792 | ||
dc2e4633 | 793 | *haddr = (void *)addr; |
1bb32a44 | 794 | return 0; |
342cd0ab CD |
795 | } |
796 | ||
6011cf68 MZ |
797 | static struct kvm_pgtable_mm_ops kvm_user_mm_ops = { |
798 | /* We shouldn't need any other callback to walk the PT */ | |
799 | .phys_to_virt = kvm_host_va, | |
800 | }; | |
801 | ||
802 | static int get_user_mapping_size(struct kvm *kvm, u64 addr) | |
803 | { | |
804 | struct kvm_pgtable pgt = { | |
6b91b8f9 | 805 | .pgd = (kvm_pteref_t)kvm->mm->pgd, |
219072c0 | 806 | .ia_bits = vabits_actual, |
419edf48 | 807 | .start_level = (KVM_PGTABLE_LAST_LEVEL - |
95e059b5 | 808 | ARM64_HW_PGTABLE_LEVELS(pgt.ia_bits) + 1), |
6011cf68 MZ |
809 | .mm_ops = &kvm_user_mm_ops, |
810 | }; | |
e86fc1a3 | 811 | unsigned long flags; |
6011cf68 | 812 | kvm_pte_t pte = 0; /* Keep GCC quiet... */ |
419edf48 | 813 | s8 level = S8_MAX; |
6011cf68 MZ |
814 | int ret; |
815 | ||
e86fc1a3 MZ |
816 | /* |
817 | * Disable IRQs so that we hazard against a concurrent | |
818 | * teardown of the userspace page tables (which relies on | |
819 | * IPI-ing threads). | |
820 | */ | |
821 | local_irq_save(flags); | |
6011cf68 | 822 | ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level); |
e86fc1a3 MZ |
823 | local_irq_restore(flags); |
824 | ||
825 | if (ret) | |
826 | return ret; | |
827 | ||
828 | /* | |
829 | * Not seeing an error, but not updating level? Something went | |
830 | * deeply wrong... | |
831 | */ | |
419edf48 RR |
832 | if (WARN_ON(level > KVM_PGTABLE_LAST_LEVEL)) |
833 | return -EFAULT; | |
834 | if (WARN_ON(level < KVM_PGTABLE_FIRST_LEVEL)) | |
e86fc1a3 MZ |
835 | return -EFAULT; |
836 | ||
837 | /* Oops, the userspace PTs are gone... Replay the fault */ | |
838 | if (!kvm_pte_valid(pte)) | |
839 | return -EAGAIN; | |
6011cf68 MZ |
840 | |
841 | return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level)); | |
842 | } | |
843 | ||
7aef0cbc QP |
844 | static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = { |
845 | .zalloc_page = stage2_memcache_zalloc_page, | |
d38ba8cc YA |
846 | .zalloc_pages_exact = kvm_s2_zalloc_pages_exact, |
847 | .free_pages_exact = kvm_s2_free_pages_exact, | |
c14d08c5 | 848 | .free_unlinked_table = stage2_free_unlinked_table, |
7aef0cbc | 849 | .get_page = kvm_host_get_page, |
d38ba8cc | 850 | .put_page = kvm_s2_put_page, |
7aef0cbc QP |
851 | .page_count = kvm_host_page_count, |
852 | .phys_to_virt = kvm_host_va, | |
853 | .virt_to_phys = kvm_host_pa, | |
25aa2869 YW |
854 | .dcache_clean_inval_poc = clean_dcache_guest_page, |
855 | .icache_inval_pou = invalidate_icache_guest_page, | |
7aef0cbc QP |
856 | }; |
857 | ||
d5d8184d | 858 | /** |
21ea4578 | 859 | * kvm_init_stage2_mmu - Initialise a S2 MMU structure |
a0e50aa3 CD |
860 | * @kvm: The pointer to the KVM structure |
861 | * @mmu: The pointer to the s2 MMU structure | |
315775ff | 862 | * @type: The machine type of the virtual machine |
d5d8184d | 863 | * |
71233d05 | 864 | * Allocates only the stage-2 HW PGD level table(s). |
d5d8184d CD |
865 | * Note we don't need locking here as this is only called when the VM is |
866 | * created, which can only be done once. | |
867 | */ | |
315775ff | 868 | int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type) |
d5d8184d | 869 | { |
315775ff | 870 | u32 kvm_ipa_limit = get_kvm_ipa_limit(); |
71233d05 WD |
871 | int cpu, err; |
872 | struct kvm_pgtable *pgt; | |
315775ff QP |
873 | u64 mmfr0, mmfr1; |
874 | u32 phys_shift; | |
875 | ||
876 | if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) | |
877 | return -EINVAL; | |
878 | ||
879 | phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type); | |
60dfe093 QP |
880 | if (is_protected_kvm_enabled()) { |
881 | phys_shift = kvm_ipa_limit; | |
882 | } else if (phys_shift) { | |
315775ff QP |
883 | if (phys_shift > kvm_ipa_limit || |
884 | phys_shift < ARM64_MIN_PARANGE_BITS) | |
885 | return -EINVAL; | |
886 | } else { | |
887 | phys_shift = KVM_PHYS_SHIFT; | |
888 | if (phys_shift > kvm_ipa_limit) { | |
889 | pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n", | |
890 | current->comm); | |
891 | return -EINVAL; | |
892 | } | |
893 | } | |
894 | ||
895 | mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); | |
896 | mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); | |
fe49fd94 | 897 | mmu->vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift); |
d5d8184d | 898 | |
71233d05 | 899 | if (mmu->pgt != NULL) { |
d5d8184d CD |
900 | kvm_err("kvm_arch already initialized?\n"); |
901 | return -EINVAL; | |
902 | } | |
903 | ||
115bae92 | 904 | pgt = kzalloc(sizeof(*pgt), GFP_KERNEL_ACCOUNT); |
71233d05 | 905 | if (!pgt) |
a987370f MZ |
906 | return -ENOMEM; |
907 | ||
9d8604b2 MZ |
908 | mmu->arch = &kvm->arch; |
909 | err = kvm_pgtable_stage2_init(pgt, mmu, &kvm_s2_mm_ops); | |
71233d05 WD |
910 | if (err) |
911 | goto out_free_pgtable; | |
e329fb75 | 912 | |
a0e50aa3 CD |
913 | mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran)); |
914 | if (!mmu->last_vcpu_ran) { | |
71233d05 WD |
915 | err = -ENOMEM; |
916 | goto out_destroy_pgtable; | |
a0e50aa3 CD |
917 | } |
918 | ||
919 | for_each_possible_cpu(cpu) | |
920 | *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1; | |
921 | ||
2f440b72 RK |
922 | /* The eager page splitting is disabled by default */ |
923 | mmu->split_page_chunk_size = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT; | |
924 | mmu->split_page_cache.gfp_zero = __GFP_ZERO; | |
925 | ||
71233d05 WD |
926 | mmu->pgt = pgt; |
927 | mmu->pgd_phys = __pa(pgt->pgd); | |
d5d8184d | 928 | return 0; |
71233d05 WD |
929 | |
930 | out_destroy_pgtable: | |
931 | kvm_pgtable_stage2_destroy(pgt); | |
932 | out_free_pgtable: | |
933 | kfree(pgt); | |
934 | return err; | |
d5d8184d CD |
935 | } |
936 | ||
ce2b6022 RK |
937 | void kvm_uninit_stage2_mmu(struct kvm *kvm) |
938 | { | |
939 | kvm_free_stage2_pgd(&kvm->arch.mmu); | |
e7bf7a49 | 940 | kvm_mmu_free_memory_cache(&kvm->arch.mmu.split_page_cache); |
ce2b6022 RK |
941 | } |
942 | ||
957db105 CD |
943 | static void stage2_unmap_memslot(struct kvm *kvm, |
944 | struct kvm_memory_slot *memslot) | |
945 | { | |
946 | hva_t hva = memslot->userspace_addr; | |
947 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; | |
948 | phys_addr_t size = PAGE_SIZE * memslot->npages; | |
949 | hva_t reg_end = hva + size; | |
950 | ||
951 | /* | |
952 | * A memory region could potentially cover multiple VMAs, and any holes | |
953 | * between them, so iterate over all of them to find out if we should | |
954 | * unmap any of them. | |
955 | * | |
956 | * +--------------------------------------------+ | |
957 | * +---------------+----------------+ +----------------+ | |
958 | * | : VMA 1 | VMA 2 | | VMA 3 : | | |
959 | * +---------------+----------------+ +----------------+ | |
960 | * | memory region | | |
961 | * +--------------------------------------------+ | |
962 | */ | |
963 | do { | |
c728fd4c | 964 | struct vm_area_struct *vma; |
957db105 CD |
965 | hva_t vm_start, vm_end; |
966 | ||
c728fd4c GS |
967 | vma = find_vma_intersection(current->mm, hva, reg_end); |
968 | if (!vma) | |
957db105 CD |
969 | break; |
970 | ||
971 | /* | |
972 | * Take the intersection of this VMA with the memory region | |
973 | */ | |
974 | vm_start = max(hva, vma->vm_start); | |
975 | vm_end = min(reg_end, vma->vm_end); | |
976 | ||
977 | if (!(vma->vm_flags & VM_PFNMAP)) { | |
978 | gpa_t gpa = addr + (vm_start - memslot->userspace_addr); | |
a0e50aa3 | 979 | unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start); |
957db105 CD |
980 | } |
981 | hva = vm_end; | |
982 | } while (hva < reg_end); | |
983 | } | |
984 | ||
985 | /** | |
986 | * stage2_unmap_vm - Unmap Stage-2 RAM mappings | |
987 | * @kvm: The struct kvm pointer | |
988 | * | |
656012c7 | 989 | * Go through the memregions and unmap any regular RAM |
957db105 CD |
990 | * backing memory already mapped to the VM. |
991 | */ | |
992 | void stage2_unmap_vm(struct kvm *kvm) | |
993 | { | |
994 | struct kvm_memslots *slots; | |
995 | struct kvm_memory_slot *memslot; | |
a54d8066 | 996 | int idx, bkt; |
957db105 CD |
997 | |
998 | idx = srcu_read_lock(&kvm->srcu); | |
89154dd5 | 999 | mmap_read_lock(current->mm); |
fcc5bf89 | 1000 | write_lock(&kvm->mmu_lock); |
957db105 CD |
1001 | |
1002 | slots = kvm_memslots(kvm); | |
a54d8066 | 1003 | kvm_for_each_memslot(memslot, bkt, slots) |
957db105 CD |
1004 | stage2_unmap_memslot(kvm, memslot); |
1005 | ||
fcc5bf89 | 1006 | write_unlock(&kvm->mmu_lock); |
89154dd5 | 1007 | mmap_read_unlock(current->mm); |
957db105 CD |
1008 | srcu_read_unlock(&kvm->srcu, idx); |
1009 | } | |
1010 | ||
a0e50aa3 | 1011 | void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu) |
d5d8184d | 1012 | { |
cfb1a98d | 1013 | struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); |
71233d05 | 1014 | struct kvm_pgtable *pgt = NULL; |
d5d8184d | 1015 | |
fcc5bf89 | 1016 | write_lock(&kvm->mmu_lock); |
71233d05 WD |
1017 | pgt = mmu->pgt; |
1018 | if (pgt) { | |
71233d05 WD |
1019 | mmu->pgd_phys = 0; |
1020 | mmu->pgt = NULL; | |
1021 | free_percpu(mmu->last_vcpu_ran); | |
6c0d706b | 1022 | } |
fcc5bf89 | 1023 | write_unlock(&kvm->mmu_lock); |
8b3405e3 | 1024 | |
71233d05 WD |
1025 | if (pgt) { |
1026 | kvm_pgtable_stage2_destroy(pgt); | |
1027 | kfree(pgt); | |
a0e50aa3 | 1028 | } |
d5d8184d CD |
1029 | } |
1030 | ||
717a7eeb QP |
1031 | static void hyp_mc_free_fn(void *addr, void *unused) |
1032 | { | |
1033 | free_page((unsigned long)addr); | |
1034 | } | |
1035 | ||
1036 | static void *hyp_mc_alloc_fn(void *unused) | |
1037 | { | |
1038 | return (void *)__get_free_page(GFP_KERNEL_ACCOUNT); | |
1039 | } | |
1040 | ||
1041 | void free_hyp_memcache(struct kvm_hyp_memcache *mc) | |
1042 | { | |
1043 | if (is_protected_kvm_enabled()) | |
1044 | __free_hyp_memcache(mc, hyp_mc_free_fn, | |
1045 | kvm_host_va, NULL); | |
1046 | } | |
1047 | ||
1048 | int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages) | |
1049 | { | |
1050 | if (!is_protected_kvm_enabled()) | |
1051 | return 0; | |
1052 | ||
1053 | return __topup_hyp_memcache(mc, min_pages, hyp_mc_alloc_fn, | |
1054 | kvm_host_pa, NULL); | |
1055 | } | |
1056 | ||
d5d8184d CD |
1057 | /** |
1058 | * kvm_phys_addr_ioremap - map a device range to guest IPA | |
1059 | * | |
1060 | * @kvm: The KVM pointer | |
1061 | * @guest_ipa: The IPA at which to insert the mapping | |
1062 | * @pa: The physical address of the device | |
1063 | * @size: The size of the mapping | |
c9c0279c | 1064 | * @writable: Whether or not to create a writable mapping |
d5d8184d CD |
1065 | */ |
1066 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | |
c40f2f8f | 1067 | phys_addr_t pa, unsigned long size, bool writable) |
d5d8184d | 1068 | { |
02bbd374 | 1069 | phys_addr_t addr; |
d5d8184d | 1070 | int ret = 0; |
837f66c7 | 1071 | struct kvm_mmu_memory_cache cache = { .gfp_zero = __GFP_ZERO }; |
fe49fd94 MZ |
1072 | struct kvm_s2_mmu *mmu = &kvm->arch.mmu; |
1073 | struct kvm_pgtable *pgt = mmu->pgt; | |
02bbd374 WD |
1074 | enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE | |
1075 | KVM_PGTABLE_PROT_R | | |
1076 | (writable ? KVM_PGTABLE_PROT_W : 0); | |
d5d8184d | 1077 | |
bff01cb6 QP |
1078 | if (is_protected_kvm_enabled()) |
1079 | return -EPERM; | |
1080 | ||
02bbd374 WD |
1081 | size += offset_in_page(guest_ipa); |
1082 | guest_ipa &= PAGE_MASK; | |
c40f2f8f | 1083 | |
02bbd374 | 1084 | for (addr = guest_ipa; addr < guest_ipa + size; addr += PAGE_SIZE) { |
c1a33aeb | 1085 | ret = kvm_mmu_topup_memory_cache(&cache, |
fe49fd94 | 1086 | kvm_mmu_cache_min_pages(mmu)); |
d5d8184d | 1087 | if (ret) |
02bbd374 WD |
1088 | break; |
1089 | ||
fcc5bf89 | 1090 | write_lock(&kvm->mmu_lock); |
02bbd374 | 1091 | ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot, |
1577cb58 | 1092 | &cache, 0); |
fcc5bf89 | 1093 | write_unlock(&kvm->mmu_lock); |
d5d8184d | 1094 | if (ret) |
02bbd374 | 1095 | break; |
d5d8184d | 1096 | |
02bbd374 | 1097 | pa += PAGE_SIZE; |
d5d8184d CD |
1098 | } |
1099 | ||
c1a33aeb | 1100 | kvm_mmu_free_memory_cache(&cache); |
d5d8184d CD |
1101 | return ret; |
1102 | } | |
1103 | ||
c6473555 MS |
1104 | /** |
1105 | * stage2_wp_range() - write protect stage2 memory region range | |
c9c0279c | 1106 | * @mmu: The KVM stage-2 MMU pointer |
c6473555 MS |
1107 | * @addr: Start address of range |
1108 | * @end: End address of range | |
1109 | */ | |
a0e50aa3 | 1110 | static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end) |
c6473555 | 1111 | { |
8531bd63 | 1112 | stage2_apply_range_resched(mmu, addr, end, kvm_pgtable_stage2_wrprotect); |
c6473555 MS |
1113 | } |
1114 | ||
1115 | /** | |
1116 | * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot | |
1117 | * @kvm: The KVM pointer | |
1118 | * @slot: The memory slot to write protect | |
1119 | * | |
1120 | * Called to start logging dirty pages after memory region | |
1121 | * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns | |
4ea5af53 | 1122 | * all present PUD, PMD and PTEs are write protected in the memory region. |
c6473555 MS |
1123 | * Afterwards read of dirty page log can be called. |
1124 | * | |
1125 | * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired, | |
1126 | * serializing operations for VM memory regions. | |
1127 | */ | |
eab62148 | 1128 | static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) |
c6473555 | 1129 | { |
9f6b8029 PB |
1130 | struct kvm_memslots *slots = kvm_memslots(kvm); |
1131 | struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); | |
0577d1ab SC |
1132 | phys_addr_t start, end; |
1133 | ||
1134 | if (WARN_ON_ONCE(!memslot)) | |
1135 | return; | |
1136 | ||
1137 | start = memslot->base_gfn << PAGE_SHIFT; | |
1138 | end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; | |
c6473555 | 1139 | |
fcc5bf89 | 1140 | write_lock(&kvm->mmu_lock); |
a0e50aa3 | 1141 | stage2_wp_range(&kvm->arch.mmu, start, end); |
fcc5bf89 | 1142 | write_unlock(&kvm->mmu_lock); |
3756b6f2 | 1143 | kvm_flush_remote_tlbs_memslot(kvm, memslot); |
c6473555 | 1144 | } |
53c810c3 | 1145 | |
e7bf7a49 RK |
1146 | /** |
1147 | * kvm_mmu_split_memory_region() - split the stage 2 blocks into PAGE_SIZE | |
1148 | * pages for memory slot | |
1149 | * @kvm: The KVM pointer | |
1150 | * @slot: The memory slot to split | |
1151 | * | |
1152 | * Acquires kvm->mmu_lock. Called with kvm->slots_lock mutex acquired, | |
1153 | * serializing operations for VM memory regions. | |
1154 | */ | |
1155 | static void kvm_mmu_split_memory_region(struct kvm *kvm, int slot) | |
1156 | { | |
1157 | struct kvm_memslots *slots; | |
1158 | struct kvm_memory_slot *memslot; | |
1159 | phys_addr_t start, end; | |
1160 | ||
1161 | lockdep_assert_held(&kvm->slots_lock); | |
1162 | ||
1163 | slots = kvm_memslots(kvm); | |
1164 | memslot = id_to_memslot(slots, slot); | |
1165 | ||
1166 | start = memslot->base_gfn << PAGE_SHIFT; | |
1167 | end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; | |
1168 | ||
1169 | write_lock(&kvm->mmu_lock); | |
1170 | kvm_mmu_split_huge_pages(kvm, start, end); | |
1171 | write_unlock(&kvm->mmu_lock); | |
1172 | } | |
1173 | ||
3b0f1d01 | 1174 | /* |
3005f6f2 RK |
1175 | * kvm_arch_mmu_enable_log_dirty_pt_masked() - enable dirty logging for selected pages. |
1176 | * @kvm: The KVM pointer | |
1177 | * @slot: The memory slot associated with mask | |
1178 | * @gfn_offset: The gfn offset in memory slot | |
1179 | * @mask: The mask of pages at offset 'gfn_offset' in this memory | |
1180 | * slot to enable dirty logging on | |
3b0f1d01 | 1181 | * |
6acf5166 RK |
1182 | * Writes protect selected pages to enable dirty logging, and then |
1183 | * splits them to PAGE_SIZE. Caller must acquire kvm->mmu_lock. | |
3b0f1d01 KH |
1184 | */ |
1185 | void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, | |
1186 | struct kvm_memory_slot *slot, | |
1187 | gfn_t gfn_offset, unsigned long mask) | |
1188 | { | |
3005f6f2 RK |
1189 | phys_addr_t base_gfn = slot->base_gfn + gfn_offset; |
1190 | phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; | |
1191 | phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; | |
1192 | ||
1193 | lockdep_assert_held_write(&kvm->mmu_lock); | |
1194 | ||
1195 | stage2_wp_range(&kvm->arch.mmu, start, end); | |
6acf5166 RK |
1196 | |
1197 | /* | |
1198 | * Eager-splitting is done when manual-protect is set. We | |
1199 | * also check for initially-all-set because we can avoid | |
1200 | * eager-splitting if initially-all-set is false. | |
1201 | * Initially-all-set equal false implies that huge-pages were | |
1202 | * already split when enabling dirty logging: no need to do it | |
1203 | * again. | |
1204 | */ | |
1205 | if (kvm_dirty_log_manual_protect_and_init_set(kvm)) | |
1206 | kvm_mmu_split_huge_pages(kvm, start, end); | |
3b0f1d01 KH |
1207 | } |
1208 | ||
1559b758 | 1209 | static void kvm_send_hwpoison_signal(unsigned long address, short lsb) |
196f878a | 1210 | { |
795a8371 | 1211 | send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current); |
196f878a JM |
1212 | } |
1213 | ||
a80868f3 SP |
1214 | static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, |
1215 | unsigned long hva, | |
1216 | unsigned long map_size) | |
6794ad54 | 1217 | { |
c2be79a0 | 1218 | gpa_t gpa_start; |
6794ad54 CD |
1219 | hva_t uaddr_start, uaddr_end; |
1220 | size_t size; | |
1221 | ||
9f283614 SP |
1222 | /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */ |
1223 | if (map_size == PAGE_SIZE) | |
1224 | return true; | |
1225 | ||
6794ad54 CD |
1226 | size = memslot->npages * PAGE_SIZE; |
1227 | ||
1228 | gpa_start = memslot->base_gfn << PAGE_SHIFT; | |
6794ad54 CD |
1229 | |
1230 | uaddr_start = memslot->userspace_addr; | |
1231 | uaddr_end = uaddr_start + size; | |
1232 | ||
1233 | /* | |
1234 | * Pages belonging to memslots that don't have the same alignment | |
a80868f3 SP |
1235 | * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2 |
1236 | * PMD/PUD entries, because we'll end up mapping the wrong pages. | |
6794ad54 CD |
1237 | * |
1238 | * Consider a layout like the following: | |
1239 | * | |
1240 | * memslot->userspace_addr: | |
1241 | * +-----+--------------------+--------------------+---+ | |
a80868f3 | 1242 | * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| |
6794ad54 CD |
1243 | * +-----+--------------------+--------------------+---+ |
1244 | * | |
9f283614 | 1245 | * memslot->base_gfn << PAGE_SHIFT: |
6794ad54 | 1246 | * +---+--------------------+--------------------+-----+ |
a80868f3 | 1247 | * |abc|def Stage-2 block | Stage-2 block |tvxyz| |
6794ad54 CD |
1248 | * +---+--------------------+--------------------+-----+ |
1249 | * | |
a80868f3 | 1250 | * If we create those stage-2 blocks, we'll end up with this incorrect |
6794ad54 CD |
1251 | * mapping: |
1252 | * d -> f | |
1253 | * e -> g | |
1254 | * f -> h | |
1255 | */ | |
a80868f3 | 1256 | if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1))) |
6794ad54 CD |
1257 | return false; |
1258 | ||
1259 | /* | |
1260 | * Next, let's make sure we're not trying to map anything not covered | |
a80868f3 SP |
1261 | * by the memslot. This means we have to prohibit block size mappings |
1262 | * for the beginning and end of a non-block aligned and non-block sized | |
6794ad54 CD |
1263 | * memory slot (illustrated by the head and tail parts of the |
1264 | * userspace view above containing pages 'abcde' and 'xyz', | |
1265 | * respectively). | |
1266 | * | |
1267 | * Note that it doesn't matter if we do the check using the | |
1268 | * userspace_addr or the base_gfn, as both are equally aligned (per | |
1269 | * the check above) and equally sized. | |
1270 | */ | |
a80868f3 SP |
1271 | return (hva & ~(map_size - 1)) >= uaddr_start && |
1272 | (hva & ~(map_size - 1)) + map_size <= uaddr_end; | |
6794ad54 CD |
1273 | } |
1274 | ||
0529c902 SP |
1275 | /* |
1276 | * Check if the given hva is backed by a transparent huge page (THP) and | |
1277 | * whether it can be mapped using block mapping in stage2. If so, adjust | |
1278 | * the stage2 PFN and IPA accordingly. Only PMD_SIZE THPs are currently | |
1279 | * supported. This will need to be updated to support other THP sizes. | |
1280 | * | |
1281 | * Returns the size of the mapping. | |
1282 | */ | |
e86fc1a3 | 1283 | static long |
6011cf68 | 1284 | transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, |
0529c902 SP |
1285 | unsigned long hva, kvm_pfn_t *pfnp, |
1286 | phys_addr_t *ipap) | |
1287 | { | |
1288 | kvm_pfn_t pfn = *pfnp; | |
1289 | ||
1290 | /* | |
1291 | * Make sure the adjustment is done only for THP pages. Also make | |
1292 | * sure that the HVA and IPA are sufficiently aligned and that the | |
1293 | * block map is contained within the memslot. | |
1294 | */ | |
e86fc1a3 MZ |
1295 | if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) { |
1296 | int sz = get_user_mapping_size(kvm, hva); | |
1297 | ||
1298 | if (sz < 0) | |
1299 | return sz; | |
1300 | ||
1301 | if (sz < PMD_SIZE) | |
1302 | return PAGE_SIZE; | |
1303 | ||
0529c902 | 1304 | *ipap &= PMD_MASK; |
0529c902 | 1305 | pfn &= ~(PTRS_PER_PMD - 1); |
0529c902 SP |
1306 | *pfnp = pfn; |
1307 | ||
1308 | return PMD_SIZE; | |
1309 | } | |
1310 | ||
1311 | /* Use page mapping if we cannot use block mapping. */ | |
1312 | return PAGE_SIZE; | |
1313 | } | |
1314 | ||
2aa53d68 KZ |
1315 | static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva) |
1316 | { | |
1317 | unsigned long pa; | |
1318 | ||
1319 | if (is_vm_hugetlb_page(vma) && !(vma->vm_flags & VM_PFNMAP)) | |
1320 | return huge_page_shift(hstate_vma(vma)); | |
1321 | ||
1322 | if (!(vma->vm_flags & VM_PFNMAP)) | |
1323 | return PAGE_SHIFT; | |
1324 | ||
1325 | VM_BUG_ON(is_vm_hugetlb_page(vma)); | |
1326 | ||
1327 | pa = (vma->vm_pgoff << PAGE_SHIFT) + (hva - vma->vm_start); | |
1328 | ||
1329 | #ifndef __PAGETABLE_PMD_FOLDED | |
1330 | if ((hva & (PUD_SIZE - 1)) == (pa & (PUD_SIZE - 1)) && | |
1331 | ALIGN_DOWN(hva, PUD_SIZE) >= vma->vm_start && | |
1332 | ALIGN(hva, PUD_SIZE) <= vma->vm_end) | |
1333 | return PUD_SHIFT; | |
1334 | #endif | |
1335 | ||
1336 | if ((hva & (PMD_SIZE - 1)) == (pa & (PMD_SIZE - 1)) && | |
1337 | ALIGN_DOWN(hva, PMD_SIZE) >= vma->vm_start && | |
1338 | ALIGN(hva, PMD_SIZE) <= vma->vm_end) | |
1339 | return PMD_SHIFT; | |
1340 | ||
1341 | return PAGE_SHIFT; | |
1342 | } | |
1343 | ||
ea7fc1bb SP |
1344 | /* |
1345 | * The page will be mapped in stage 2 as Normal Cacheable, so the VM will be | |
1346 | * able to see the page's tags and therefore they must be initialised first. If | |
1347 | * PG_mte_tagged is set, tags have already been initialised. | |
1348 | * | |
1349 | * The race in the test/set of the PG_mte_tagged flag is handled by: | |
1350 | * - preventing VM_SHARED mappings in a memslot with MTE preventing two VMs | |
1351 | * racing to santise the same page | |
1352 | * - mmap_lock protects between a VM faulting a page in and the VMM performing | |
1353 | * an mprotect() to add VM_MTE | |
1354 | */ | |
2dbf12ae CM |
1355 | static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn, |
1356 | unsigned long size) | |
ea7fc1bb SP |
1357 | { |
1358 | unsigned long i, nr_pages = size >> PAGE_SHIFT; | |
2dbf12ae | 1359 | struct page *page = pfn_to_page(pfn); |
ea7fc1bb SP |
1360 | |
1361 | if (!kvm_has_mte(kvm)) | |
2dbf12ae | 1362 | return; |
ea7fc1bb SP |
1363 | |
1364 | for (i = 0; i < nr_pages; i++, page++) { | |
d77e59a8 | 1365 | if (try_page_mte_tagging(page)) { |
ea7fc1bb | 1366 | mte_clear_page_tags(page_address(page)); |
e059853d | 1367 | set_page_mte_tagged(page); |
ea7fc1bb SP |
1368 | } |
1369 | } | |
ea7fc1bb | 1370 | } |
ea7fc1bb | 1371 | |
d89585fb PC |
1372 | static bool kvm_vma_mte_allowed(struct vm_area_struct *vma) |
1373 | { | |
d89585fb | 1374 | return vma->vm_flags & VM_MTE_ALLOWED; |
ea7fc1bb SP |
1375 | } |
1376 | ||
94f8e641 | 1377 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
98047888 | 1378 | struct kvm_memory_slot *memslot, unsigned long hva, |
11e5ea52 | 1379 | bool fault_is_perm) |
94f8e641 | 1380 | { |
ffd1b63a | 1381 | int ret = 0; |
6396b852 | 1382 | bool write_fault, writable, force_pte = false; |
8c2e8ac8 | 1383 | bool exec_fault, mte_allowed; |
8c47ce3e | 1384 | bool device = false, vfio_allow_any_uc = false; |
94f8e641 | 1385 | unsigned long mmu_seq; |
ad361f09 | 1386 | struct kvm *kvm = vcpu->kvm; |
94f8e641 | 1387 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; |
ad361f09 | 1388 | struct vm_area_struct *vma; |
1559b758 | 1389 | short vma_shift; |
6f745f1b | 1390 | gfn_t gfn; |
ba049e93 | 1391 | kvm_pfn_t pfn; |
15a49a44 | 1392 | bool logging_active = memslot_is_logging(memslot); |
e86fc1a3 | 1393 | long vma_pagesize, fault_granule; |
6f745f1b WD |
1394 | enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; |
1395 | struct kvm_pgtable *pgt; | |
94f8e641 | 1396 | |
11e5ea52 AB |
1397 | if (fault_is_perm) |
1398 | fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu); | |
a7d079ce | 1399 | write_fault = kvm_is_write_fault(vcpu); |
c4ad98e4 | 1400 | exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); |
d0e22b4a MZ |
1401 | VM_BUG_ON(write_fault && exec_fault); |
1402 | ||
11e5ea52 | 1403 | if (fault_is_perm && !write_fault && !exec_fault) { |
94f8e641 CD |
1404 | kvm_err("Unexpected L2 read permission error\n"); |
1405 | return -EFAULT; | |
1406 | } | |
1407 | ||
13ec9308 DM |
1408 | /* |
1409 | * Permission faults just need to update the existing leaf entry, | |
1410 | * and so normally don't require allocations from the memcache. The | |
1411 | * only exception to this is when dirty logging is enabled at runtime | |
1412 | * and a write fault needs to collapse a block entry into a table. | |
1413 | */ | |
11e5ea52 | 1414 | if (!fault_is_perm || (logging_active && write_fault)) { |
13ec9308 | 1415 | ret = kvm_mmu_topup_memory_cache(memcache, |
fe49fd94 | 1416 | kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu)); |
13ec9308 DM |
1417 | if (ret) |
1418 | return ret; | |
1419 | } | |
1420 | ||
2aa53d68 KZ |
1421 | /* |
1422 | * Let's check if we will get back a huge page backed by hugetlbfs, or | |
1423 | * get block mapping for device MMIO region. | |
1424 | */ | |
89154dd5 | 1425 | mmap_read_lock(current->mm); |
09eef83a | 1426 | vma = vma_lookup(current->mm, hva); |
37b54408 AB |
1427 | if (unlikely(!vma)) { |
1428 | kvm_err("Failed to find VMA for hva 0x%lx\n", hva); | |
89154dd5 | 1429 | mmap_read_unlock(current->mm); |
37b54408 AB |
1430 | return -EFAULT; |
1431 | } | |
1432 | ||
2aa53d68 KZ |
1433 | /* |
1434 | * logging_active is guaranteed to never be true for VM_PFNMAP | |
1435 | * memslots. | |
1436 | */ | |
1437 | if (logging_active) { | |
a80868f3 | 1438 | force_pte = true; |
523b3999 | 1439 | vma_shift = PAGE_SHIFT; |
2aa53d68 KZ |
1440 | } else { |
1441 | vma_shift = get_vma_page_shift(vma, hva); | |
523b3999 AE |
1442 | } |
1443 | ||
2f40c460 | 1444 | switch (vma_shift) { |
faf00039 | 1445 | #ifndef __PAGETABLE_PMD_FOLDED |
2f40c460 GS |
1446 | case PUD_SHIFT: |
1447 | if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE)) | |
1448 | break; | |
1449 | fallthrough; | |
faf00039 | 1450 | #endif |
2f40c460 GS |
1451 | case CONT_PMD_SHIFT: |
1452 | vma_shift = PMD_SHIFT; | |
1453 | fallthrough; | |
1454 | case PMD_SHIFT: | |
1455 | if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) | |
1456 | break; | |
1457 | fallthrough; | |
1458 | case CONT_PTE_SHIFT: | |
523b3999 | 1459 | vma_shift = PAGE_SHIFT; |
2f40c460 GS |
1460 | force_pte = true; |
1461 | fallthrough; | |
1462 | case PAGE_SHIFT: | |
1463 | break; | |
1464 | default: | |
1465 | WARN_ONCE(1, "Unknown vma_shift %d", vma_shift); | |
a80868f3 SP |
1466 | } |
1467 | ||
523b3999 | 1468 | vma_pagesize = 1UL << vma_shift; |
6f745f1b | 1469 | if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) |
523b3999 | 1470 | fault_ipa &= ~(vma_pagesize - 1); |
6f745f1b WD |
1471 | |
1472 | gfn = fault_ipa >> PAGE_SHIFT; | |
8c2e8ac8 MZ |
1473 | mte_allowed = kvm_vma_mte_allowed(vma); |
1474 | ||
8c47ce3e AA |
1475 | vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED; |
1476 | ||
8c2e8ac8 MZ |
1477 | /* Don't use the VMA after the unlock -- it may have vanished */ |
1478 | vma = NULL; | |
94f8e641 | 1479 | |
94f8e641 | 1480 | /* |
13ec9308 DM |
1481 | * Read mmu_invalidate_seq so that KVM can detect if the results of |
1482 | * vma_lookup() or __gfn_to_pfn_memslot() become stale prior to | |
1483 | * acquiring kvm->mmu_lock. | |
10ba2d17 | 1484 | * |
13ec9308 DM |
1485 | * Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs |
1486 | * with the smp_wmb() in kvm_mmu_invalidate_end(). | |
94f8e641 | 1487 | */ |
13ec9308 DM |
1488 | mmu_seq = vcpu->kvm->mmu_invalidate_seq; |
1489 | mmap_read_unlock(current->mm); | |
94f8e641 | 1490 | |
c8b88b33 | 1491 | pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, |
10ba2d17 | 1492 | write_fault, &writable, NULL); |
196f878a | 1493 | if (pfn == KVM_PFN_ERR_HWPOISON) { |
1559b758 | 1494 | kvm_send_hwpoison_signal(hva, vma_shift); |
196f878a JM |
1495 | return 0; |
1496 | } | |
9ac71595 | 1497 | if (is_error_noslot_pfn(pfn)) |
94f8e641 CD |
1498 | return -EFAULT; |
1499 | ||
15a49a44 | 1500 | if (kvm_is_device_pfn(pfn)) { |
2aa53d68 KZ |
1501 | /* |
1502 | * If the page was identified as device early by looking at | |
1503 | * the VMA flags, vma_pagesize is already representing the | |
1504 | * largest quantity we can map. If instead it was mapped | |
1505 | * via gfn_to_pfn_prot(), vma_pagesize is set to PAGE_SIZE | |
1506 | * and must not be upgraded. | |
1507 | * | |
1508 | * In both cases, we don't let transparent_hugepage_adjust() | |
1509 | * change things at the last minute. | |
1510 | */ | |
6f745f1b WD |
1511 | device = true; |
1512 | } else if (logging_active && !write_fault) { | |
15a49a44 MS |
1513 | /* |
1514 | * Only actually map the page as writable if this was a write | |
1515 | * fault. | |
1516 | */ | |
6f745f1b | 1517 | writable = false; |
15a49a44 | 1518 | } |
b8865767 | 1519 | |
6f745f1b | 1520 | if (exec_fault && device) |
6d674e28 MZ |
1521 | return -ENOEXEC; |
1522 | ||
1577cb58 | 1523 | read_lock(&kvm->mmu_lock); |
6f745f1b | 1524 | pgt = vcpu->arch.hw_mmu->pgt; |
20ec3ebd | 1525 | if (mmu_invalidate_retry(kvm, mmu_seq)) |
94f8e641 | 1526 | goto out_unlock; |
15a49a44 | 1527 | |
0529c902 SP |
1528 | /* |
1529 | * If we are not forced to use page mapping, check if we are | |
1530 | * backed by a THP and thus use block mapping if possible. | |
1531 | */ | |
f2cc3273 | 1532 | if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) { |
11e5ea52 | 1533 | if (fault_is_perm && fault_granule > PAGE_SIZE) |
f2cc3273 MZ |
1534 | vma_pagesize = fault_granule; |
1535 | else | |
1536 | vma_pagesize = transparent_hugepage_adjust(kvm, memslot, | |
1537 | hva, &pfn, | |
1538 | &fault_ipa); | |
e86fc1a3 MZ |
1539 | |
1540 | if (vma_pagesize < 0) { | |
1541 | ret = vma_pagesize; | |
1542 | goto out_unlock; | |
1543 | } | |
f2cc3273 | 1544 | } |
ad361f09 | 1545 | |
11e5ea52 | 1546 | if (!fault_is_perm && !device && kvm_has_mte(kvm)) { |
d89585fb | 1547 | /* Check the VMM hasn't introduced a new disallowed VMA */ |
8c2e8ac8 | 1548 | if (mte_allowed) { |
2dbf12ae CM |
1549 | sanitise_mte_tags(kvm, pfn, vma_pagesize); |
1550 | } else { | |
ea7fc1bb | 1551 | ret = -EFAULT; |
ea7fc1bb | 1552 | goto out_unlock; |
2dbf12ae | 1553 | } |
ea7fc1bb | 1554 | } |
3f58bf63 | 1555 | |
509552e6 | 1556 | if (writable) |
6f745f1b | 1557 | prot |= KVM_PGTABLE_PROT_W; |
ad361f09 | 1558 | |
25aa2869 | 1559 | if (exec_fault) |
6f745f1b | 1560 | prot |= KVM_PGTABLE_PROT_X; |
3f58bf63 | 1561 | |
8c47ce3e AA |
1562 | if (device) { |
1563 | if (vfio_allow_any_uc) | |
1564 | prot |= KVM_PGTABLE_PROT_NORMAL_NC; | |
1565 | else | |
1566 | prot |= KVM_PGTABLE_PROT_DEVICE; | |
1567 | } else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC)) { | |
6f745f1b | 1568 | prot |= KVM_PGTABLE_PROT_X; |
8c47ce3e | 1569 | } |
a15f6939 | 1570 | |
7d894834 YW |
1571 | /* |
1572 | * Under the premise of getting a FSC_PERM fault, we just need to relax | |
1573 | * permissions only if vma_pagesize equals fault_granule. Otherwise, | |
1574 | * kvm_pgtable_stage2_map() should be called to change block size. | |
1575 | */ | |
11e5ea52 | 1576 | if (fault_is_perm && vma_pagesize == fault_granule) |
6f745f1b | 1577 | ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot); |
1577cb58 | 1578 | else |
6f745f1b WD |
1579 | ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize, |
1580 | __pfn_to_phys(pfn), prot, | |
ddcadb29 OU |
1581 | memcache, |
1582 | KVM_PGTABLE_WALK_HANDLE_FAULT | | |
1583 | KVM_PGTABLE_WALK_SHARED); | |
ad361f09 | 1584 | |
509552e6 YW |
1585 | /* Mark the page dirty only if the fault is handled successfully */ |
1586 | if (writable && !ret) { | |
1587 | kvm_set_pfn_dirty(pfn); | |
10ba2d17 | 1588 | mark_page_dirty_in_slot(kvm, memslot, gfn); |
509552e6 YW |
1589 | } |
1590 | ||
94f8e641 | 1591 | out_unlock: |
1577cb58 | 1592 | read_unlock(&kvm->mmu_lock); |
94f8e641 | 1593 | kvm_release_pfn_clean(pfn); |
509552e6 | 1594 | return ret != -EAGAIN ? ret : 0; |
94f8e641 CD |
1595 | } |
1596 | ||
ee8efad7 | 1597 | /* Resolve the access fault by making the page young again. */ |
aeda9130 MZ |
1598 | static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) |
1599 | { | |
9a7ad19a | 1600 | kvm_pte_t pte; |
ee8efad7 | 1601 | struct kvm_s2_mmu *mmu; |
aeda9130 MZ |
1602 | |
1603 | trace_kvm_access_fault(fault_ipa); | |
1604 | ||
fc61f554 | 1605 | read_lock(&vcpu->kvm->mmu_lock); |
ee8efad7 | 1606 | mmu = vcpu->arch.hw_mmu; |
9a7ad19a | 1607 | pte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa); |
fc61f554 | 1608 | read_unlock(&vcpu->kvm->mmu_lock); |
ee8efad7 | 1609 | |
9a7ad19a OU |
1610 | if (kvm_pte_valid(pte)) |
1611 | kvm_set_pfn_accessed(kvm_pte_to_pfn(pte)); | |
aeda9130 MZ |
1612 | } |
1613 | ||
94f8e641 CD |
1614 | /** |
1615 | * kvm_handle_guest_abort - handles all 2nd stage aborts | |
1616 | * @vcpu: the VCPU pointer | |
94f8e641 CD |
1617 | * |
1618 | * Any abort that gets to the host is almost guaranteed to be caused by a | |
1619 | * missing second stage translation table entry, which can mean that either the | |
1620 | * guest simply needs more memory and we must allocate an appropriate page or it | |
1621 | * can mean that the guest tried to access I/O memory, which is emulated by user | |
1622 | * space. The distinction is based on the IPA causing the fault and whether this | |
1623 | * memory region has been registered as standard RAM by user space. | |
1624 | */ | |
74cc7e0c | 1625 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) |
342cd0ab | 1626 | { |
11e5ea52 | 1627 | unsigned long esr; |
94f8e641 CD |
1628 | phys_addr_t fault_ipa; |
1629 | struct kvm_memory_slot *memslot; | |
98047888 CD |
1630 | unsigned long hva; |
1631 | bool is_iabt, write_fault, writable; | |
94f8e641 CD |
1632 | gfn_t gfn; |
1633 | int ret, idx; | |
1634 | ||
11e5ea52 | 1635 | esr = kvm_vcpu_get_esr(vcpu); |
621f48e4 TB |
1636 | |
1637 | fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); | |
bb428921 | 1638 | is_iabt = kvm_vcpu_trap_is_iabt(vcpu); |
621f48e4 | 1639 | |
f5fe0ade | 1640 | if (esr_fsc_is_translation_fault(esr)) { |
85ea6b1e MZ |
1641 | /* Beyond sanitised PARange (which is the IPA limit) */ |
1642 | if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) { | |
1643 | kvm_inject_size_fault(vcpu); | |
1644 | return 1; | |
1645 | } | |
1646 | ||
1647 | /* Falls between the IPA range and the PARange? */ | |
1648 | if (fault_ipa >= BIT_ULL(vcpu->arch.hw_mmu->pgt->ia_bits)) { | |
1649 | fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0); | |
1650 | ||
1651 | if (is_iabt) | |
1652 | kvm_inject_pabt(vcpu, fault_ipa); | |
1653 | else | |
1654 | kvm_inject_dabt(vcpu, fault_ipa); | |
1655 | return 1; | |
1656 | } | |
1657 | } | |
1658 | ||
bb428921 | 1659 | /* Synchronous External Abort? */ |
c9a636f2 | 1660 | if (kvm_vcpu_abt_issea(vcpu)) { |
bb428921 JM |
1661 | /* |
1662 | * For RAS the host kernel may handle this abort. | |
1663 | * There is no need to pass the error into the guest. | |
1664 | */ | |
84b951a8 | 1665 | if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu))) |
bb428921 | 1666 | kvm_inject_vabt(vcpu); |
84b951a8 WD |
1667 | |
1668 | return 1; | |
4055710b MZ |
1669 | } |
1670 | ||
3a949f4c | 1671 | trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu), |
7393b599 | 1672 | kvm_vcpu_get_hfar(vcpu), fault_ipa); |
94f8e641 CD |
1673 | |
1674 | /* Check the stage-2 fault is trans. fault or write fault */ | |
11e5ea52 AB |
1675 | if (!esr_fsc_is_translation_fault(esr) && |
1676 | !esr_fsc_is_permission_fault(esr) && | |
1677 | !esr_fsc_is_access_flag_fault(esr)) { | |
0496daa5 CD |
1678 | kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n", |
1679 | kvm_vcpu_trap_get_class(vcpu), | |
1680 | (unsigned long)kvm_vcpu_trap_get_fault(vcpu), | |
3a949f4c | 1681 | (unsigned long)kvm_vcpu_get_esr(vcpu)); |
94f8e641 CD |
1682 | return -EFAULT; |
1683 | } | |
1684 | ||
1685 | idx = srcu_read_lock(&vcpu->kvm->srcu); | |
1686 | ||
1687 | gfn = fault_ipa >> PAGE_SHIFT; | |
98047888 CD |
1688 | memslot = gfn_to_memslot(vcpu->kvm, gfn); |
1689 | hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); | |
a7d079ce | 1690 | write_fault = kvm_is_write_fault(vcpu); |
98047888 | 1691 | if (kvm_is_error_hva(hva) || (write_fault && !writable)) { |
022c8328 WD |
1692 | /* |
1693 | * The guest has put either its instructions or its page-tables | |
1694 | * somewhere it shouldn't have. Userspace won't be able to do | |
1695 | * anything about this (there's no syndrome for a start), so | |
1696 | * re-inject the abort back into the guest. | |
1697 | */ | |
94f8e641 | 1698 | if (is_iabt) { |
6d674e28 MZ |
1699 | ret = -ENOEXEC; |
1700 | goto out; | |
94f8e641 CD |
1701 | } |
1702 | ||
c4ad98e4 | 1703 | if (kvm_vcpu_abt_iss1tw(vcpu)) { |
022c8328 WD |
1704 | kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
1705 | ret = 1; | |
1706 | goto out_unlock; | |
1707 | } | |
1708 | ||
57c841f1 MZ |
1709 | /* |
1710 | * Check for a cache maintenance operation. Since we | |
1711 | * ended-up here, we know it is outside of any memory | |
1712 | * slot. But we can't find out if that is for a device, | |
1713 | * or if the guest is just being stupid. The only thing | |
1714 | * we know for sure is that this range cannot be cached. | |
1715 | * | |
1716 | * So let's assume that the guest is just being | |
1717 | * cautious, and skip the instruction. | |
1718 | */ | |
54dc0d24 | 1719 | if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) { |
cdb5e02e | 1720 | kvm_incr_pc(vcpu); |
57c841f1 MZ |
1721 | ret = 1; |
1722 | goto out_unlock; | |
1723 | } | |
1724 | ||
cfe3950c MZ |
1725 | /* |
1726 | * The IPA is reported as [MAX:12], so we need to | |
1727 | * complement it with the bottom 12 bits from the | |
1728 | * faulting VA. This is always 12 bits, irrespective | |
1729 | * of the page size. | |
1730 | */ | |
1731 | fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1); | |
74cc7e0c | 1732 | ret = io_mem_abort(vcpu, fault_ipa); |
94f8e641 CD |
1733 | goto out_unlock; |
1734 | } | |
1735 | ||
c3058d5d | 1736 | /* Userspace should not be able to register out-of-bounds IPAs */ |
fe49fd94 | 1737 | VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->arch.hw_mmu)); |
c3058d5d | 1738 | |
11e5ea52 | 1739 | if (esr_fsc_is_access_flag_fault(esr)) { |
aeda9130 MZ |
1740 | handle_access_fault(vcpu, fault_ipa); |
1741 | ret = 1; | |
1742 | goto out_unlock; | |
1743 | } | |
1744 | ||
11e5ea52 AB |
1745 | ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, |
1746 | esr_fsc_is_permission_fault(esr)); | |
94f8e641 CD |
1747 | if (ret == 0) |
1748 | ret = 1; | |
6d674e28 MZ |
1749 | out: |
1750 | if (ret == -ENOEXEC) { | |
1751 | kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); | |
1752 | ret = 1; | |
1753 | } | |
94f8e641 CD |
1754 | out_unlock: |
1755 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | |
1756 | return ret; | |
342cd0ab CD |
1757 | } |
1758 | ||
cd4c7183 | 1759 | bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) |
d5d8184d | 1760 | { |
cd4c7183 | 1761 | if (!kvm->arch.mmu.pgt) |
fcb82839 | 1762 | return false; |
d5d8184d | 1763 | |
cd4c7183 SC |
1764 | __unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT, |
1765 | (range->end - range->start) << PAGE_SHIFT, | |
1766 | range->may_block); | |
b5331379 | 1767 | |
fcb82839 | 1768 | return false; |
d5d8184d CD |
1769 | } |
1770 | ||
cd4c7183 | 1771 | bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) |
d5d8184d | 1772 | { |
3e1efe2b | 1773 | kvm_pfn_t pfn = pte_pfn(range->arg.pte); |
cd4c7183 | 1774 | |
063deeb1 | 1775 | if (!kvm->arch.mmu.pgt) |
fcb82839 | 1776 | return false; |
d5d8184d | 1777 | |
cd4c7183 | 1778 | WARN_ON(range->end - range->start != 1); |
d5d8184d | 1779 | |
2dbf12ae CM |
1780 | /* |
1781 | * If the page isn't tagged, defer to user_mem_abort() for sanitising | |
1782 | * the MTE tags. The S2 pte should have been unmapped by | |
1783 | * mmu_notifier_invalidate_range_end(). | |
1784 | */ | |
1785 | if (kvm_has_mte(kvm) && !page_mte_tagged(pfn_to_page(pfn))) | |
ea7fc1bb SP |
1786 | return false; |
1787 | ||
cd4c7183 | 1788 | /* |
25aa2869 YW |
1789 | * We've moved a page around, probably through CoW, so let's treat |
1790 | * it just like a translation fault and the map handler will clean | |
1791 | * the cache to the PoC. | |
1792 | * | |
e9edb17a | 1793 | * The MMU notifiers will have unmapped a huge PMD before calling |
cd4c7183 | 1794 | * ->change_pte() (which in turn calls kvm_set_spte_gfn()) and |
e9edb17a WD |
1795 | * therefore we never need to clear out a huge PMD through this |
1796 | * calling path and a memcache is not required. | |
15a49a44 | 1797 | */ |
cd4c7183 SC |
1798 | kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT, |
1799 | PAGE_SIZE, __pfn_to_phys(pfn), | |
1577cb58 | 1800 | KVM_PGTABLE_PROT_R, NULL, 0); |
cd4c7183 | 1801 | |
fcb82839 | 1802 | return false; |
d5d8184d CD |
1803 | } |
1804 | ||
cd4c7183 | 1805 | bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) |
d5d8184d | 1806 | { |
cd4c7183 | 1807 | u64 size = (range->end - range->start) << PAGE_SHIFT; |
d5d8184d | 1808 | |
e9edb17a | 1809 | if (!kvm->arch.mmu.pgt) |
fcb82839 | 1810 | return false; |
d5d8184d | 1811 | |
df6556ad OU |
1812 | return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt, |
1813 | range->start << PAGE_SHIFT, | |
1814 | size, true); | |
35307b9a MZ |
1815 | } |
1816 | ||
cd4c7183 | 1817 | bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) |
35307b9a | 1818 | { |
df6556ad OU |
1819 | u64 size = (range->end - range->start) << PAGE_SHIFT; |
1820 | ||
063deeb1 | 1821 | if (!kvm->arch.mmu.pgt) |
fcb82839 | 1822 | return false; |
501b9185 | 1823 | |
df6556ad OU |
1824 | return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt, |
1825 | range->start << PAGE_SHIFT, | |
1826 | size, false); | |
35307b9a MZ |
1827 | } |
1828 | ||
342cd0ab CD |
1829 | phys_addr_t kvm_mmu_get_httbr(void) |
1830 | { | |
0f9d09b8 | 1831 | return __pa(hyp_pgtable->pgd); |
342cd0ab CD |
1832 | } |
1833 | ||
5a677ce0 MZ |
1834 | phys_addr_t kvm_get_idmap_vector(void) |
1835 | { | |
1836 | return hyp_idmap_vector; | |
1837 | } | |
1838 | ||
0f9d09b8 | 1839 | static int kvm_map_idmap_text(void) |
0535a3e2 | 1840 | { |
0f9d09b8 WD |
1841 | unsigned long size = hyp_idmap_end - hyp_idmap_start; |
1842 | int err = __create_hyp_mappings(hyp_idmap_start, size, hyp_idmap_start, | |
1843 | PAGE_HYP_EXEC); | |
0535a3e2 MZ |
1844 | if (err) |
1845 | kvm_err("Failed to idmap %lx-%lx\n", | |
1846 | hyp_idmap_start, hyp_idmap_end); | |
1847 | ||
1848 | return err; | |
1849 | } | |
1850 | ||
7aef0cbc QP |
1851 | static void *kvm_hyp_zalloc_page(void *arg) |
1852 | { | |
1853 | return (void *)get_zeroed_page(GFP_KERNEL); | |
1854 | } | |
1855 | ||
1856 | static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = { | |
1857 | .zalloc_page = kvm_hyp_zalloc_page, | |
1858 | .get_page = kvm_host_get_page, | |
1859 | .put_page = kvm_host_put_page, | |
1860 | .phys_to_virt = kvm_host_va, | |
1861 | .virt_to_phys = kvm_host_pa, | |
1862 | }; | |
1863 | ||
8d20bd63 | 1864 | int __init kvm_mmu_init(u32 *hyp_va_bits) |
342cd0ab | 1865 | { |
2fb41059 | 1866 | int err; |
579d7ebe RR |
1867 | u32 idmap_bits; |
1868 | u32 kernel_bits; | |
2fb41059 | 1869 | |
0a78791c | 1870 | hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start); |
46fef158 | 1871 | hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE); |
0a78791c | 1872 | hyp_idmap_end = __pa_symbol(__hyp_idmap_text_end); |
46fef158 | 1873 | hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE); |
0a78791c | 1874 | hyp_idmap_vector = __pa_symbol(__kvm_hyp_init); |
5a677ce0 | 1875 | |
06f75a1f AB |
1876 | /* |
1877 | * We rely on the linker script to ensure at build time that the HYP | |
1878 | * init code does not cross a page boundary. | |
1879 | */ | |
1880 | BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); | |
5a677ce0 | 1881 | |
579d7ebe | 1882 | /* |
e6128a8e AB |
1883 | * The ID map is always configured for 48 bits of translation, which |
1884 | * may be fewer than the number of VA bits used by the regular kernel | |
1885 | * stage 1, when VA_BITS=52. | |
579d7ebe RR |
1886 | * |
1887 | * At EL2, there is only one TTBR register, and we can't switch between | |
1888 | * translation tables *and* update TCR_EL2.T0SZ at the same time. Bottom | |
1889 | * line: we need to use the extended range with *both* our translation | |
1890 | * tables. | |
1891 | * | |
1892 | * So use the maximum of the idmap VA bits and the regular kernel stage | |
1893 | * 1 VA bits to assure that the hypervisor can both ID map its code page | |
1894 | * and map any kernel memory. | |
1895 | */ | |
e6128a8e | 1896 | idmap_bits = IDMAP_VA_BITS; |
579d7ebe RR |
1897 | kernel_bits = vabits_actual; |
1898 | *hyp_va_bits = max(idmap_bits, kernel_bits); | |
1899 | ||
bfa79a80 | 1900 | kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits); |
b4ef0499 MZ |
1901 | kvm_debug("IDMAP page: %lx\n", hyp_idmap_start); |
1902 | kvm_debug("HYP VA range: %lx:%lx\n", | |
1903 | kern_hyp_va(PAGE_OFFSET), | |
1904 | kern_hyp_va((unsigned long)high_memory - 1)); | |
eac378a9 | 1905 | |
6c41a413 | 1906 | if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && |
ed57cac8 | 1907 | hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) && |
d2896d4b | 1908 | hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) { |
eac378a9 MZ |
1909 | /* |
1910 | * The idmap page is intersecting with the VA space, | |
1911 | * it is not safe to continue further. | |
1912 | */ | |
1913 | kvm_err("IDMAP intersecting with HYP VA, unable to continue\n"); | |
1914 | err = -EINVAL; | |
1915 | goto out; | |
1916 | } | |
1917 | ||
0f9d09b8 WD |
1918 | hyp_pgtable = kzalloc(sizeof(*hyp_pgtable), GFP_KERNEL); |
1919 | if (!hyp_pgtable) { | |
1920 | kvm_err("Hyp mode page-table not allocated\n"); | |
2fb41059 MZ |
1921 | err = -ENOMEM; |
1922 | goto out; | |
1923 | } | |
1924 | ||
bfa79a80 | 1925 | err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops); |
0f9d09b8 WD |
1926 | if (err) |
1927 | goto out_free_pgtable; | |
d5d8184d | 1928 | |
0f9d09b8 WD |
1929 | err = kvm_map_idmap_text(); |
1930 | if (err) | |
1931 | goto out_destroy_pgtable; | |
5a677ce0 | 1932 | |
e3f019b3 | 1933 | io_map_base = hyp_idmap_start; |
d5d8184d | 1934 | return 0; |
0f9d09b8 WD |
1935 | |
1936 | out_destroy_pgtable: | |
1937 | kvm_pgtable_hyp_destroy(hyp_pgtable); | |
1938 | out_free_pgtable: | |
1939 | kfree(hyp_pgtable); | |
1940 | hyp_pgtable = NULL; | |
2fb41059 | 1941 | out: |
2fb41059 | 1942 | return err; |
342cd0ab | 1943 | } |
df6ce24f EA |
1944 | |
1945 | void kvm_arch_commit_memory_region(struct kvm *kvm, | |
9d4c197c | 1946 | struct kvm_memory_slot *old, |
f36f3f28 | 1947 | const struct kvm_memory_slot *new, |
df6ce24f EA |
1948 | enum kvm_mr_change change) |
1949 | { | |
6bd92b9d RK |
1950 | bool log_dirty_pages = new && new->flags & KVM_MEM_LOG_DIRTY_PAGES; |
1951 | ||
c6473555 MS |
1952 | /* |
1953 | * At this point memslot has been committed and there is an | |
656012c7 | 1954 | * allocated dirty_bitmap[], dirty pages will be tracked while the |
c6473555 MS |
1955 | * memory slot is write protected. |
1956 | */ | |
6bd92b9d RK |
1957 | if (log_dirty_pages) { |
1958 | ||
1959 | if (change == KVM_MR_DELETE) | |
1960 | return; | |
1961 | ||
c862626e | 1962 | /* |
e7bf7a49 RK |
1963 | * Huge and normal pages are write-protected and split |
1964 | * on either of these two cases: | |
6bd92b9d RK |
1965 | * |
1966 | * 1. with initial-all-set: gradually with CLEAR ioctls, | |
c862626e | 1967 | */ |
6bd92b9d RK |
1968 | if (kvm_dirty_log_manual_protect_and_init_set(kvm)) |
1969 | return; | |
1970 | /* | |
1971 | * or | |
1972 | * 2. without initial-all-set: all in one shot when | |
1973 | * enabling dirty logging. | |
1974 | */ | |
1975 | kvm_mmu_wp_memory_region(kvm, new->id); | |
e7bf7a49 RK |
1976 | kvm_mmu_split_memory_region(kvm, new->id); |
1977 | } else { | |
1978 | /* | |
1979 | * Free any leftovers from the eager page splitting cache. Do | |
1980 | * this when deleting, moving, disabling dirty logging, or | |
1981 | * creating the memslot (a nop). Doing it for deletes makes | |
1982 | * sure we don't leak memory, and there's no need to keep the | |
1983 | * cache around for any of the other cases. | |
1984 | */ | |
1985 | kvm_mmu_free_memory_cache(&kvm->arch.mmu.split_page_cache); | |
c862626e | 1986 | } |
df6ce24f EA |
1987 | } |
1988 | ||
1989 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | |
537a17b3 SC |
1990 | const struct kvm_memory_slot *old, |
1991 | struct kvm_memory_slot *new, | |
df6ce24f EA |
1992 | enum kvm_mr_change change) |
1993 | { | |
509c594c | 1994 | hva_t hva, reg_end; |
8eef9123 AB |
1995 | int ret = 0; |
1996 | ||
15a49a44 MS |
1997 | if (change != KVM_MR_CREATE && change != KVM_MR_MOVE && |
1998 | change != KVM_MR_FLAGS_ONLY) | |
8eef9123 AB |
1999 | return 0; |
2000 | ||
c3058d5d CD |
2001 | /* |
2002 | * Prevent userspace from creating a memory region outside of the IPA | |
2003 | * space addressable by the KVM guest IPA space. | |
2004 | */ | |
fe49fd94 | 2005 | if ((new->base_gfn + new->npages) > (kvm_phys_size(&kvm->arch.mmu) >> PAGE_SHIFT)) |
c3058d5d CD |
2006 | return -EFAULT; |
2007 | ||
509c594c SC |
2008 | hva = new->userspace_addr; |
2009 | reg_end = hva + (new->npages << PAGE_SHIFT); | |
2010 | ||
89154dd5 | 2011 | mmap_read_lock(current->mm); |
8eef9123 AB |
2012 | /* |
2013 | * A memory region could potentially cover multiple VMAs, and any holes | |
fd6f17ba | 2014 | * between them, so iterate over all of them. |
8eef9123 AB |
2015 | * |
2016 | * +--------------------------------------------+ | |
2017 | * +---------------+----------------+ +----------------+ | |
2018 | * | : VMA 1 | VMA 2 | | VMA 3 : | | |
2019 | * +---------------+----------------+ +----------------+ | |
2020 | * | memory region | | |
2021 | * +--------------------------------------------+ | |
2022 | */ | |
2023 | do { | |
c728fd4c | 2024 | struct vm_area_struct *vma; |
8eef9123 | 2025 | |
c728fd4c GS |
2026 | vma = find_vma_intersection(current->mm, hva, reg_end); |
2027 | if (!vma) | |
8eef9123 AB |
2028 | break; |
2029 | ||
d89585fb | 2030 | if (kvm_has_mte(kvm) && !kvm_vma_mte_allowed(vma)) { |
6e6a8ef0 QP |
2031 | ret = -EINVAL; |
2032 | break; | |
2033 | } | |
ea7fc1bb | 2034 | |
8eef9123 | 2035 | if (vma->vm_flags & VM_PFNMAP) { |
15a49a44 | 2036 | /* IO region dirty page logging not allowed */ |
537a17b3 | 2037 | if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { |
72f31048 | 2038 | ret = -EINVAL; |
8eef9123 | 2039 | break; |
fd6f17ba | 2040 | } |
8eef9123 | 2041 | } |
fd6f17ba | 2042 | hva = min(reg_end, vma->vm_end); |
8eef9123 AB |
2043 | } while (hva < reg_end); |
2044 | ||
89154dd5 | 2045 | mmap_read_unlock(current->mm); |
8eef9123 | 2046 | return ret; |
df6ce24f EA |
2047 | } |
2048 | ||
e96c81ee | 2049 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) |
df6ce24f EA |
2050 | { |
2051 | } | |
2052 | ||
15248258 | 2053 | void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) |
df6ce24f EA |
2054 | { |
2055 | } | |
2056 | ||
2057 | void kvm_arch_flush_shadow_all(struct kvm *kvm) | |
2058 | { | |
ce2b6022 | 2059 | kvm_uninit_stage2_mmu(kvm); |
df6ce24f EA |
2060 | } |
2061 | ||
2062 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | |
2063 | struct kvm_memory_slot *slot) | |
2064 | { | |
8eef9123 AB |
2065 | gpa_t gpa = slot->base_gfn << PAGE_SHIFT; |
2066 | phys_addr_t size = slot->npages << PAGE_SHIFT; | |
2067 | ||
fcc5bf89 | 2068 | write_lock(&kvm->mmu_lock); |
a0e50aa3 | 2069 | unmap_stage2_range(&kvm->arch.mmu, gpa, size); |
fcc5bf89 | 2070 | write_unlock(&kvm->mmu_lock); |
df6ce24f | 2071 | } |
3c1e7165 MZ |
2072 | |
2073 | /* | |
2074 | * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). | |
2075 | * | |
2076 | * Main problems: | |
2077 | * - S/W ops are local to a CPU (not broadcast) | |
2078 | * - We have line migration behind our back (speculation) | |
2079 | * - System caches don't support S/W at all (damn!) | |
2080 | * | |
2081 | * In the face of the above, the best we can do is to try and convert | |
2082 | * S/W ops to VA ops. Because the guest is not allowed to infer the | |
2083 | * S/W to PA mapping, it can only use S/W to nuke the whole cache, | |
2084 | * which is a rather good thing for us. | |
2085 | * | |
2086 | * Also, it is only used when turning caches on/off ("The expected | |
2087 | * usage of the cache maintenance instructions that operate by set/way | |
2088 | * is associated with the cache maintenance instructions associated | |
2089 | * with the powerdown and powerup of caches, if this is required by | |
2090 | * the implementation."). | |
2091 | * | |
2092 | * We use the following policy: | |
2093 | * | |
2094 | * - If we trap a S/W operation, we enable VM trapping to detect | |
2095 | * caches being turned on/off, and do a full clean. | |
2096 | * | |
2097 | * - We flush the caches on both caches being turned on and off. | |
2098 | * | |
2099 | * - Once the caches are enabled, we stop trapping VM ops. | |
2100 | */ | |
2101 | void kvm_set_way_flush(struct kvm_vcpu *vcpu) | |
2102 | { | |
3df59d8d | 2103 | unsigned long hcr = *vcpu_hcr(vcpu); |
3c1e7165 MZ |
2104 | |
2105 | /* | |
2106 | * If this is the first time we do a S/W operation | |
2107 | * (i.e. HCR_TVM not set) flush the whole memory, and set the | |
2108 | * VM trapping. | |
2109 | * | |
2110 | * Otherwise, rely on the VM trapping to wait for the MMU + | |
2111 | * Caches to be turned off. At that point, we'll be able to | |
2112 | * clean the caches again. | |
2113 | */ | |
2114 | if (!(hcr & HCR_TVM)) { | |
2115 | trace_kvm_set_way_flush(*vcpu_pc(vcpu), | |
2116 | vcpu_has_cache_enabled(vcpu)); | |
2117 | stage2_flush_vm(vcpu->kvm); | |
3df59d8d | 2118 | *vcpu_hcr(vcpu) = hcr | HCR_TVM; |
3c1e7165 MZ |
2119 | } |
2120 | } | |
2121 | ||
2122 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled) | |
2123 | { | |
2124 | bool now_enabled = vcpu_has_cache_enabled(vcpu); | |
2125 | ||
2126 | /* | |
2127 | * If switching the MMU+caches on, need to invalidate the caches. | |
2128 | * If switching it off, need to clean the caches. | |
2129 | * Clean + invalidate does the trick always. | |
2130 | */ | |
2131 | if (now_enabled != was_enabled) | |
2132 | stage2_flush_vm(vcpu->kvm); | |
2133 | ||
2134 | /* Caches are now on, stop trapping VM ops (until a S/W op) */ | |
2135 | if (now_enabled) | |
3df59d8d | 2136 | *vcpu_hcr(vcpu) &= ~HCR_TVM; |
3c1e7165 MZ |
2137 | |
2138 | trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled); | |
2139 | } |