KVM: arm64: Update page shift if stage 2 block mapping not supported
[linux-block.git] / arch / arm64 / kvm / mmu.c
CommitLineData
d94d71cb 1// SPDX-License-Identifier: GPL-2.0-only
749cf76c
CD
2/*
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
749cf76c 5 */
342cd0ab
CD
6
7#include <linux/mman.h>
8#include <linux/kvm_host.h>
9#include <linux/io.h>
ad361f09 10#include <linux/hugetlb.h>
196f878a 11#include <linux/sched/signal.h>
45e96ea6 12#include <trace/events/kvm.h>
342cd0ab 13#include <asm/pgalloc.h>
94f8e641 14#include <asm/cacheflush.h>
342cd0ab
CD
15#include <asm/kvm_arm.h>
16#include <asm/kvm_mmu.h>
0db5e022 17#include <asm/kvm_ras.h>
d5d8184d 18#include <asm/kvm_asm.h>
94f8e641 19#include <asm/kvm_emulate.h>
1e947bad 20#include <asm/virt.h>
d5d8184d
CD
21
22#include "trace.h"
342cd0ab 23
5a677ce0 24static pgd_t *boot_hyp_pgd;
2fb41059 25static pgd_t *hyp_pgd;
e4c5a685 26static pgd_t *merged_hyp_pgd;
342cd0ab
CD
27static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
28
5a677ce0
MZ
29static unsigned long hyp_idmap_start;
30static unsigned long hyp_idmap_end;
31static phys_addr_t hyp_idmap_vector;
32
e3f019b3
MZ
33static unsigned long io_map_base;
34
38f791a4 35#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
5d4e08c4 36
15a49a44
MS
37#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
38#define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
39
6d674e28
MZ
40static bool is_iomap(unsigned long flags)
41{
42 return flags & KVM_S2PTE_FLAG_IS_IOMAP;
43}
44
15a49a44
MS
45static bool memslot_is_logging(struct kvm_memory_slot *memslot)
46{
15a49a44 47 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
7276030a
MS
48}
49
50/**
51 * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
52 * @kvm: pointer to kvm structure.
53 *
54 * Interface to HYP function to flush all VM TLB entries
55 */
56void kvm_flush_remote_tlbs(struct kvm *kvm)
57{
a0e50aa3 58 kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu);
15a49a44 59}
ad361f09 60
efaa5b93
MZ
61static void kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
62 int level)
d5d8184d 63{
efaa5b93 64 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ipa, level);
d5d8184d
CD
65}
66
363ef89f
MZ
67/*
68 * D-Cache management functions. They take the page table entries by
69 * value, as they are flushing the cache using the kernel mapping (or
70 * kmap on 32bit).
71 */
72static void kvm_flush_dcache_pte(pte_t pte)
73{
74 __kvm_flush_dcache_pte(pte);
75}
76
77static void kvm_flush_dcache_pmd(pmd_t pmd)
78{
79 __kvm_flush_dcache_pmd(pmd);
80}
81
82static void kvm_flush_dcache_pud(pud_t pud)
83{
84 __kvm_flush_dcache_pud(pud);
85}
86
e6fab544
AB
87static bool kvm_is_device_pfn(unsigned long pfn)
88{
89 return !pfn_valid(pfn);
90}
91
15a49a44
MS
92/**
93 * stage2_dissolve_pmd() - clear and flush huge PMD entry
a0e50aa3 94 * @mmu: pointer to mmu structure to operate on
15a49a44
MS
95 * @addr: IPA
96 * @pmd: pmd pointer for IPA
97 *
8324c3d5 98 * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs.
15a49a44 99 */
a0e50aa3 100static void stage2_dissolve_pmd(struct kvm_s2_mmu *mmu, phys_addr_t addr, pmd_t *pmd)
15a49a44 101{
bbb3b6b3 102 if (!pmd_thp_or_huge(*pmd))
15a49a44
MS
103 return;
104
105 pmd_clear(pmd);
efaa5b93 106 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PMD_LEVEL);
15a49a44
MS
107 put_page(virt_to_page(pmd));
108}
109
b8e0ba7c
PA
110/**
111 * stage2_dissolve_pud() - clear and flush huge PUD entry
a0e50aa3 112 * @mmu: pointer to mmu structure to operate on
b8e0ba7c
PA
113 * @addr: IPA
114 * @pud: pud pointer for IPA
115 *
8324c3d5 116 * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs.
b8e0ba7c 117 */
a0e50aa3 118static void stage2_dissolve_pud(struct kvm_s2_mmu *mmu, phys_addr_t addr, pud_t *pudp)
b8e0ba7c 119{
a0e50aa3
CD
120 struct kvm *kvm = mmu->kvm;
121
b8e0ba7c
PA
122 if (!stage2_pud_huge(kvm, *pudp))
123 return;
124
125 stage2_pud_clear(kvm, pudp);
efaa5b93 126 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PUD_LEVEL);
b8e0ba7c
PA
127 put_page(virt_to_page(pudp));
128}
129
a0e50aa3 130static void clear_stage2_pgd_entry(struct kvm_s2_mmu *mmu, pgd_t *pgd, phys_addr_t addr)
979acd5e 131{
a0e50aa3 132 struct kvm *kvm = mmu->kvm;
e9f63768 133 p4d_t *p4d_table __maybe_unused = stage2_p4d_offset(kvm, pgd, 0UL);
e55cac5b 134 stage2_pgd_clear(kvm, pgd);
efaa5b93 135 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
e9f63768 136 stage2_p4d_free(kvm, p4d_table);
4f853a71 137 put_page(virt_to_page(pgd));
979acd5e
MZ
138}
139
a0e50aa3 140static void clear_stage2_p4d_entry(struct kvm_s2_mmu *mmu, p4d_t *p4d, phys_addr_t addr)
e9f63768 141{
a0e50aa3 142 struct kvm *kvm = mmu->kvm;
e9f63768
MR
143 pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, p4d, 0);
144 stage2_p4d_clear(kvm, p4d);
efaa5b93 145 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
e9f63768
MR
146 stage2_pud_free(kvm, pud_table);
147 put_page(virt_to_page(p4d));
148}
149
a0e50aa3 150static void clear_stage2_pud_entry(struct kvm_s2_mmu *mmu, pud_t *pud, phys_addr_t addr)
342cd0ab 151{
a0e50aa3 152 struct kvm *kvm = mmu->kvm;
e55cac5b 153 pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0);
a0e50aa3 154
e55cac5b
SP
155 VM_BUG_ON(stage2_pud_huge(kvm, *pud));
156 stage2_pud_clear(kvm, pud);
efaa5b93 157 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
e55cac5b 158 stage2_pmd_free(kvm, pmd_table);
4f728276
MZ
159 put_page(virt_to_page(pud));
160}
342cd0ab 161
a0e50aa3 162static void clear_stage2_pmd_entry(struct kvm_s2_mmu *mmu, pmd_t *pmd, phys_addr_t addr)
4f728276 163{
4f853a71 164 pte_t *pte_table = pte_offset_kernel(pmd, 0);
bbb3b6b3 165 VM_BUG_ON(pmd_thp_or_huge(*pmd));
4f853a71 166 pmd_clear(pmd);
efaa5b93 167 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
14b94d07 168 free_page((unsigned long)pte_table);
4f728276
MZ
169 put_page(virt_to_page(pmd));
170}
171
88dc25e8
MZ
172static inline void kvm_set_pte(pte_t *ptep, pte_t new_pte)
173{
174 WRITE_ONCE(*ptep, new_pte);
175 dsb(ishst);
176}
177
178static inline void kvm_set_pmd(pmd_t *pmdp, pmd_t new_pmd)
179{
180 WRITE_ONCE(*pmdp, new_pmd);
181 dsb(ishst);
182}
183
0db9dd8a
MZ
184static inline void kvm_pmd_populate(pmd_t *pmdp, pte_t *ptep)
185{
186 kvm_set_pmd(pmdp, kvm_mk_pmd(ptep));
187}
188
189static inline void kvm_pud_populate(pud_t *pudp, pmd_t *pmdp)
190{
191 WRITE_ONCE(*pudp, kvm_mk_pud(pmdp));
192 dsb(ishst);
193}
194
e9f63768 195static inline void kvm_p4d_populate(p4d_t *p4dp, pud_t *pudp)
0db9dd8a 196{
e9f63768 197 WRITE_ONCE(*p4dp, kvm_mk_p4d(pudp));
0db9dd8a
MZ
198 dsb(ishst);
199}
200
e9f63768
MR
201static inline void kvm_pgd_populate(pgd_t *pgdp, p4d_t *p4dp)
202{
203#ifndef __PAGETABLE_P4D_FOLDED
204 WRITE_ONCE(*pgdp, kvm_mk_pgd(p4dp));
205 dsb(ishst);
206#endif
207}
208
363ef89f
MZ
209/*
210 * Unmapping vs dcache management:
211 *
212 * If a guest maps certain memory pages as uncached, all writes will
213 * bypass the data cache and go directly to RAM. However, the CPUs
214 * can still speculate reads (not writes) and fill cache lines with
215 * data.
216 *
217 * Those cache lines will be *clean* cache lines though, so a
218 * clean+invalidate operation is equivalent to an invalidate
219 * operation, because no cache lines are marked dirty.
220 *
221 * Those clean cache lines could be filled prior to an uncached write
222 * by the guest, and the cache coherent IO subsystem would therefore
223 * end up writing old data to disk.
224 *
225 * This is why right after unmapping a page/section and invalidating
226 * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
227 * the IO subsystem will never hit in the cache.
e48d53a9
MZ
228 *
229 * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as
230 * we then fully enforce cacheability of RAM, no matter what the guest
231 * does.
363ef89f 232 */
a0e50aa3 233static void unmap_stage2_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd,
4f853a71 234 phys_addr_t addr, phys_addr_t end)
4f728276 235{
4f853a71
CD
236 phys_addr_t start_addr = addr;
237 pte_t *pte, *start_pte;
238
239 start_pte = pte = pte_offset_kernel(pmd, addr);
240 do {
241 if (!pte_none(*pte)) {
363ef89f
MZ
242 pte_t old_pte = *pte;
243
4f853a71 244 kvm_set_pte(pte, __pte(0));
efaa5b93 245 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PTE_LEVEL);
363ef89f
MZ
246
247 /* No need to invalidate the cache for device mappings */
0de58f85 248 if (!kvm_is_device_pfn(pte_pfn(old_pte)))
363ef89f
MZ
249 kvm_flush_dcache_pte(old_pte);
250
251 put_page(virt_to_page(pte));
4f853a71
CD
252 }
253 } while (pte++, addr += PAGE_SIZE, addr != end);
254
a0e50aa3
CD
255 if (stage2_pte_table_empty(mmu->kvm, start_pte))
256 clear_stage2_pmd_entry(mmu, pmd, start_addr);
342cd0ab
CD
257}
258
a0e50aa3 259static void unmap_stage2_pmds(struct kvm_s2_mmu *mmu, pud_t *pud,
4f853a71 260 phys_addr_t addr, phys_addr_t end)
000d3996 261{
a0e50aa3 262 struct kvm *kvm = mmu->kvm;
4f853a71
CD
263 phys_addr_t next, start_addr = addr;
264 pmd_t *pmd, *start_pmd;
000d3996 265
e55cac5b 266 start_pmd = pmd = stage2_pmd_offset(kvm, pud, addr);
4f853a71 267 do {
e55cac5b 268 next = stage2_pmd_addr_end(kvm, addr, end);
4f853a71 269 if (!pmd_none(*pmd)) {
bbb3b6b3 270 if (pmd_thp_or_huge(*pmd)) {
363ef89f
MZ
271 pmd_t old_pmd = *pmd;
272
4f853a71 273 pmd_clear(pmd);
efaa5b93 274 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PMD_LEVEL);
363ef89f
MZ
275
276 kvm_flush_dcache_pmd(old_pmd);
277
4f853a71
CD
278 put_page(virt_to_page(pmd));
279 } else {
a0e50aa3 280 unmap_stage2_ptes(mmu, pmd, addr, next);
4f853a71 281 }
ad361f09 282 }
4f853a71 283 } while (pmd++, addr = next, addr != end);
ad361f09 284
e55cac5b 285 if (stage2_pmd_table_empty(kvm, start_pmd))
a0e50aa3 286 clear_stage2_pud_entry(mmu, pud, start_addr);
4f853a71 287}
000d3996 288
a0e50aa3 289static void unmap_stage2_puds(struct kvm_s2_mmu *mmu, p4d_t *p4d,
4f853a71
CD
290 phys_addr_t addr, phys_addr_t end)
291{
a0e50aa3 292 struct kvm *kvm = mmu->kvm;
4f853a71
CD
293 phys_addr_t next, start_addr = addr;
294 pud_t *pud, *start_pud;
4f728276 295
e9f63768 296 start_pud = pud = stage2_pud_offset(kvm, p4d, addr);
4f853a71 297 do {
e55cac5b
SP
298 next = stage2_pud_addr_end(kvm, addr, end);
299 if (!stage2_pud_none(kvm, *pud)) {
300 if (stage2_pud_huge(kvm, *pud)) {
363ef89f
MZ
301 pud_t old_pud = *pud;
302
e55cac5b 303 stage2_pud_clear(kvm, pud);
efaa5b93 304 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PUD_LEVEL);
363ef89f 305 kvm_flush_dcache_pud(old_pud);
4f853a71
CD
306 put_page(virt_to_page(pud));
307 } else {
a0e50aa3 308 unmap_stage2_pmds(mmu, pud, addr, next);
4f728276
MZ
309 }
310 }
4f853a71 311 } while (pud++, addr = next, addr != end);
4f728276 312
e55cac5b 313 if (stage2_pud_table_empty(kvm, start_pud))
a0e50aa3 314 clear_stage2_p4d_entry(mmu, p4d, start_addr);
e9f63768
MR
315}
316
a0e50aa3 317static void unmap_stage2_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
e9f63768
MR
318 phys_addr_t addr, phys_addr_t end)
319{
a0e50aa3 320 struct kvm *kvm = mmu->kvm;
e9f63768
MR
321 phys_addr_t next, start_addr = addr;
322 p4d_t *p4d, *start_p4d;
323
324 start_p4d = p4d = stage2_p4d_offset(kvm, pgd, addr);
325 do {
326 next = stage2_p4d_addr_end(kvm, addr, end);
327 if (!stage2_p4d_none(kvm, *p4d))
a0e50aa3 328 unmap_stage2_puds(mmu, p4d, addr, next);
e9f63768
MR
329 } while (p4d++, addr = next, addr != end);
330
331 if (stage2_p4d_table_empty(kvm, start_p4d))
a0e50aa3 332 clear_stage2_pgd_entry(mmu, pgd, start_addr);
4f853a71
CD
333}
334
7a1c831e
SP
335/**
336 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
337 * @kvm: The VM pointer
338 * @start: The intermediate physical base address of the range to unmap
339 * @size: The size of the area to unmap
340 *
341 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
342 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
343 * destroying the VM), otherwise another faulting VCPU may come in and mess
344 * with things behind our backs.
345 */
a0e50aa3 346static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
4f853a71 347{
a0e50aa3 348 struct kvm *kvm = mmu->kvm;
4f853a71
CD
349 pgd_t *pgd;
350 phys_addr_t addr = start, end = start + size;
351 phys_addr_t next;
352
8b3405e3 353 assert_spin_locked(&kvm->mmu_lock);
47a91b72
JH
354 WARN_ON(size & ~PAGE_MASK);
355
a0e50aa3 356 pgd = mmu->pgd + stage2_pgd_index(kvm, addr);
4f853a71 357 do {
0c428a6a
SP
358 /*
359 * Make sure the page table is still active, as another thread
360 * could have possibly freed the page table, while we released
361 * the lock.
362 */
a0e50aa3 363 if (!READ_ONCE(mmu->pgd))
0c428a6a 364 break;
e55cac5b
SP
365 next = stage2_pgd_addr_end(kvm, addr, end);
366 if (!stage2_pgd_none(kvm, *pgd))
a0e50aa3 367 unmap_stage2_p4ds(mmu, pgd, addr, next);
8b3405e3
SP
368 /*
369 * If the range is too large, release the kvm->mmu_lock
370 * to prevent starvation and lockup detector warnings.
371 */
372 if (next != end)
373 cond_resched_lock(&kvm->mmu_lock);
4f853a71 374 } while (pgd++, addr = next, addr != end);
000d3996
MZ
375}
376
a0e50aa3 377static void stage2_flush_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd,
9d218a1f
MZ
378 phys_addr_t addr, phys_addr_t end)
379{
380 pte_t *pte;
381
382 pte = pte_offset_kernel(pmd, addr);
383 do {
0de58f85 384 if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
363ef89f 385 kvm_flush_dcache_pte(*pte);
9d218a1f
MZ
386 } while (pte++, addr += PAGE_SIZE, addr != end);
387}
388
a0e50aa3 389static void stage2_flush_pmds(struct kvm_s2_mmu *mmu, pud_t *pud,
9d218a1f
MZ
390 phys_addr_t addr, phys_addr_t end)
391{
a0e50aa3 392 struct kvm *kvm = mmu->kvm;
9d218a1f
MZ
393 pmd_t *pmd;
394 phys_addr_t next;
395
e55cac5b 396 pmd = stage2_pmd_offset(kvm, pud, addr);
9d218a1f 397 do {
e55cac5b 398 next = stage2_pmd_addr_end(kvm, addr, end);
9d218a1f 399 if (!pmd_none(*pmd)) {
bbb3b6b3 400 if (pmd_thp_or_huge(*pmd))
363ef89f
MZ
401 kvm_flush_dcache_pmd(*pmd);
402 else
a0e50aa3 403 stage2_flush_ptes(mmu, pmd, addr, next);
9d218a1f
MZ
404 }
405 } while (pmd++, addr = next, addr != end);
406}
407
a0e50aa3 408static void stage2_flush_puds(struct kvm_s2_mmu *mmu, p4d_t *p4d,
9d218a1f
MZ
409 phys_addr_t addr, phys_addr_t end)
410{
a0e50aa3 411 struct kvm *kvm = mmu->kvm;
9d218a1f
MZ
412 pud_t *pud;
413 phys_addr_t next;
414
e9f63768 415 pud = stage2_pud_offset(kvm, p4d, addr);
9d218a1f 416 do {
e55cac5b
SP
417 next = stage2_pud_addr_end(kvm, addr, end);
418 if (!stage2_pud_none(kvm, *pud)) {
419 if (stage2_pud_huge(kvm, *pud))
363ef89f
MZ
420 kvm_flush_dcache_pud(*pud);
421 else
a0e50aa3 422 stage2_flush_pmds(mmu, pud, addr, next);
9d218a1f
MZ
423 }
424 } while (pud++, addr = next, addr != end);
425}
426
a0e50aa3 427static void stage2_flush_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
e9f63768
MR
428 phys_addr_t addr, phys_addr_t end)
429{
a0e50aa3 430 struct kvm *kvm = mmu->kvm;
e9f63768
MR
431 p4d_t *p4d;
432 phys_addr_t next;
433
434 p4d = stage2_p4d_offset(kvm, pgd, addr);
435 do {
436 next = stage2_p4d_addr_end(kvm, addr, end);
437 if (!stage2_p4d_none(kvm, *p4d))
a0e50aa3 438 stage2_flush_puds(mmu, p4d, addr, next);
e9f63768
MR
439 } while (p4d++, addr = next, addr != end);
440}
441
9d218a1f
MZ
442static void stage2_flush_memslot(struct kvm *kvm,
443 struct kvm_memory_slot *memslot)
444{
a0e50aa3 445 struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
9d218a1f
MZ
446 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
447 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
448 phys_addr_t next;
449 pgd_t *pgd;
450
a0e50aa3 451 pgd = mmu->pgd + stage2_pgd_index(kvm, addr);
9d218a1f 452 do {
e55cac5b
SP
453 next = stage2_pgd_addr_end(kvm, addr, end);
454 if (!stage2_pgd_none(kvm, *pgd))
a0e50aa3 455 stage2_flush_p4ds(mmu, pgd, addr, next);
48c963e3
JY
456
457 if (next != end)
458 cond_resched_lock(&kvm->mmu_lock);
9d218a1f
MZ
459 } while (pgd++, addr = next, addr != end);
460}
461
462/**
463 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
464 * @kvm: The struct kvm pointer
465 *
466 * Go through the stage 2 page tables and invalidate any cache lines
467 * backing memory already mapped to the VM.
468 */
3c1e7165 469static void stage2_flush_vm(struct kvm *kvm)
9d218a1f
MZ
470{
471 struct kvm_memslots *slots;
472 struct kvm_memory_slot *memslot;
473 int idx;
474
475 idx = srcu_read_lock(&kvm->srcu);
476 spin_lock(&kvm->mmu_lock);
477
478 slots = kvm_memslots(kvm);
479 kvm_for_each_memslot(memslot, slots)
480 stage2_flush_memslot(kvm, memslot);
481
482 spin_unlock(&kvm->mmu_lock);
483 srcu_read_unlock(&kvm->srcu, idx);
484}
485
64f32497
SP
486static void clear_hyp_pgd_entry(pgd_t *pgd)
487{
e9f63768 488 p4d_t *p4d_table __maybe_unused = p4d_offset(pgd, 0UL);
64f32497 489 pgd_clear(pgd);
e9f63768 490 p4d_free(NULL, p4d_table);
64f32497
SP
491 put_page(virt_to_page(pgd));
492}
493
e9f63768
MR
494static void clear_hyp_p4d_entry(p4d_t *p4d)
495{
496 pud_t *pud_table __maybe_unused = pud_offset(p4d, 0UL);
497 VM_BUG_ON(p4d_huge(*p4d));
498 p4d_clear(p4d);
499 pud_free(NULL, pud_table);
500 put_page(virt_to_page(p4d));
501}
502
64f32497
SP
503static void clear_hyp_pud_entry(pud_t *pud)
504{
505 pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0);
506 VM_BUG_ON(pud_huge(*pud));
507 pud_clear(pud);
508 pmd_free(NULL, pmd_table);
509 put_page(virt_to_page(pud));
510}
511
512static void clear_hyp_pmd_entry(pmd_t *pmd)
513{
514 pte_t *pte_table = pte_offset_kernel(pmd, 0);
515 VM_BUG_ON(pmd_thp_or_huge(*pmd));
516 pmd_clear(pmd);
517 pte_free_kernel(NULL, pte_table);
518 put_page(virt_to_page(pmd));
519}
520
521static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
522{
523 pte_t *pte, *start_pte;
524
525 start_pte = pte = pte_offset_kernel(pmd, addr);
526 do {
527 if (!pte_none(*pte)) {
528 kvm_set_pte(pte, __pte(0));
529 put_page(virt_to_page(pte));
530 }
531 } while (pte++, addr += PAGE_SIZE, addr != end);
532
533 if (hyp_pte_table_empty(start_pte))
534 clear_hyp_pmd_entry(pmd);
535}
536
537static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
538{
539 phys_addr_t next;
540 pmd_t *pmd, *start_pmd;
541
542 start_pmd = pmd = pmd_offset(pud, addr);
543 do {
544 next = pmd_addr_end(addr, end);
545 /* Hyp doesn't use huge pmds */
546 if (!pmd_none(*pmd))
547 unmap_hyp_ptes(pmd, addr, next);
548 } while (pmd++, addr = next, addr != end);
549
550 if (hyp_pmd_table_empty(start_pmd))
551 clear_hyp_pud_entry(pud);
552}
553
e9f63768 554static void unmap_hyp_puds(p4d_t *p4d, phys_addr_t addr, phys_addr_t end)
64f32497
SP
555{
556 phys_addr_t next;
557 pud_t *pud, *start_pud;
558
e9f63768 559 start_pud = pud = pud_offset(p4d, addr);
64f32497
SP
560 do {
561 next = pud_addr_end(addr, end);
562 /* Hyp doesn't use huge puds */
563 if (!pud_none(*pud))
564 unmap_hyp_pmds(pud, addr, next);
565 } while (pud++, addr = next, addr != end);
566
567 if (hyp_pud_table_empty(start_pud))
e9f63768
MR
568 clear_hyp_p4d_entry(p4d);
569}
570
571static void unmap_hyp_p4ds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
572{
573 phys_addr_t next;
574 p4d_t *p4d, *start_p4d;
575
576 start_p4d = p4d = p4d_offset(pgd, addr);
577 do {
578 next = p4d_addr_end(addr, end);
579 /* Hyp doesn't use huge p4ds */
580 if (!p4d_none(*p4d))
581 unmap_hyp_puds(p4d, addr, next);
582 } while (p4d++, addr = next, addr != end);
583
584 if (hyp_p4d_table_empty(start_p4d))
64f32497
SP
585 clear_hyp_pgd_entry(pgd);
586}
587
3ddd4556
MZ
588static unsigned int kvm_pgd_index(unsigned long addr, unsigned int ptrs_per_pgd)
589{
590 return (addr >> PGDIR_SHIFT) & (ptrs_per_pgd - 1);
591}
592
593static void __unmap_hyp_range(pgd_t *pgdp, unsigned long ptrs_per_pgd,
594 phys_addr_t start, u64 size)
64f32497
SP
595{
596 pgd_t *pgd;
597 phys_addr_t addr = start, end = start + size;
598 phys_addr_t next;
599
600 /*
601 * We don't unmap anything from HYP, except at the hyp tear down.
602 * Hence, we don't have to invalidate the TLBs here.
603 */
3ddd4556 604 pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
64f32497
SP
605 do {
606 next = pgd_addr_end(addr, end);
607 if (!pgd_none(*pgd))
e9f63768 608 unmap_hyp_p4ds(pgd, addr, next);
64f32497
SP
609 } while (pgd++, addr = next, addr != end);
610}
611
3ddd4556
MZ
612static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
613{
614 __unmap_hyp_range(pgdp, PTRS_PER_PGD, start, size);
615}
616
617static void unmap_hyp_idmap_range(pgd_t *pgdp, phys_addr_t start, u64 size)
618{
619 __unmap_hyp_range(pgdp, __kvm_idmap_ptrs_per_pgd(), start, size);
620}
621
342cd0ab 622/**
4f728276 623 * free_hyp_pgds - free Hyp-mode page tables
342cd0ab 624 *
5a677ce0
MZ
625 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
626 * therefore contains either mappings in the kernel memory area (above
e3f019b3 627 * PAGE_OFFSET), or device mappings in the idmap range.
5a677ce0 628 *
e3f019b3
MZ
629 * boot_hyp_pgd should only map the idmap range, and is only used in
630 * the extended idmap case.
342cd0ab 631 */
4f728276 632void free_hyp_pgds(void)
342cd0ab 633{
e3f019b3
MZ
634 pgd_t *id_pgd;
635
d157f4a5 636 mutex_lock(&kvm_hyp_pgd_mutex);
5a677ce0 637
e3f019b3
MZ
638 id_pgd = boot_hyp_pgd ? boot_hyp_pgd : hyp_pgd;
639
640 if (id_pgd) {
641 /* In case we never called hyp_mmu_init() */
642 if (!io_map_base)
643 io_map_base = hyp_idmap_start;
644 unmap_hyp_idmap_range(id_pgd, io_map_base,
645 hyp_idmap_start + PAGE_SIZE - io_map_base);
646 }
647
26781f9c 648 if (boot_hyp_pgd) {
26781f9c
MZ
649 free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
650 boot_hyp_pgd = NULL;
651 }
652
4f728276 653 if (hyp_pgd) {
7839c672
MZ
654 unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
655 (uintptr_t)high_memory - PAGE_OFFSET);
d4cb9df5 656
38f791a4 657 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
d157f4a5 658 hyp_pgd = NULL;
4f728276 659 }
e4c5a685
AB
660 if (merged_hyp_pgd) {
661 clear_page(merged_hyp_pgd);
662 free_page((unsigned long)merged_hyp_pgd);
663 merged_hyp_pgd = NULL;
664 }
4f728276 665
342cd0ab
CD
666 mutex_unlock(&kvm_hyp_pgd_mutex);
667}
668
669static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
6060df84
MZ
670 unsigned long end, unsigned long pfn,
671 pgprot_t prot)
342cd0ab
CD
672{
673 pte_t *pte;
674 unsigned long addr;
342cd0ab 675
3562c76d
MZ
676 addr = start;
677 do {
6060df84 678 pte = pte_offset_kernel(pmd, addr);
f8df7338 679 kvm_set_pte(pte, kvm_pfn_pte(pfn, prot));
4f728276 680 get_page(virt_to_page(pte));
6060df84 681 pfn++;
3562c76d 682 } while (addr += PAGE_SIZE, addr != end);
342cd0ab
CD
683}
684
685static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
6060df84
MZ
686 unsigned long end, unsigned long pfn,
687 pgprot_t prot)
342cd0ab
CD
688{
689 pmd_t *pmd;
690 pte_t *pte;
691 unsigned long addr, next;
692
3562c76d
MZ
693 addr = start;
694 do {
6060df84 695 pmd = pmd_offset(pud, addr);
342cd0ab
CD
696
697 BUG_ON(pmd_sect(*pmd));
698
699 if (pmd_none(*pmd)) {
4cf58924 700 pte = pte_alloc_one_kernel(NULL);
342cd0ab
CD
701 if (!pte) {
702 kvm_err("Cannot allocate Hyp pte\n");
703 return -ENOMEM;
704 }
0db9dd8a 705 kvm_pmd_populate(pmd, pte);
4f728276 706 get_page(virt_to_page(pmd));
342cd0ab
CD
707 }
708
709 next = pmd_addr_end(addr, end);
710
6060df84
MZ
711 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
712 pfn += (next - addr) >> PAGE_SHIFT;
3562c76d 713 } while (addr = next, addr != end);
342cd0ab
CD
714
715 return 0;
716}
717
e9f63768 718static int create_hyp_pud_mappings(p4d_t *p4d, unsigned long start,
38f791a4
CD
719 unsigned long end, unsigned long pfn,
720 pgprot_t prot)
721{
722 pud_t *pud;
723 pmd_t *pmd;
724 unsigned long addr, next;
725 int ret;
726
727 addr = start;
728 do {
e9f63768 729 pud = pud_offset(p4d, addr);
38f791a4
CD
730
731 if (pud_none_or_clear_bad(pud)) {
732 pmd = pmd_alloc_one(NULL, addr);
733 if (!pmd) {
734 kvm_err("Cannot allocate Hyp pmd\n");
735 return -ENOMEM;
736 }
0db9dd8a 737 kvm_pud_populate(pud, pmd);
38f791a4 738 get_page(virt_to_page(pud));
38f791a4
CD
739 }
740
741 next = pud_addr_end(addr, end);
742 ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
743 if (ret)
744 return ret;
745 pfn += (next - addr) >> PAGE_SHIFT;
746 } while (addr = next, addr != end);
747
748 return 0;
749}
750
e9f63768
MR
751static int create_hyp_p4d_mappings(pgd_t *pgd, unsigned long start,
752 unsigned long end, unsigned long pfn,
753 pgprot_t prot)
754{
755 p4d_t *p4d;
756 pud_t *pud;
757 unsigned long addr, next;
758 int ret;
759
760 addr = start;
761 do {
762 p4d = p4d_offset(pgd, addr);
763
764 if (p4d_none(*p4d)) {
765 pud = pud_alloc_one(NULL, addr);
766 if (!pud) {
767 kvm_err("Cannot allocate Hyp pud\n");
768 return -ENOMEM;
769 }
770 kvm_p4d_populate(p4d, pud);
771 get_page(virt_to_page(p4d));
772 }
773
774 next = p4d_addr_end(addr, end);
775 ret = create_hyp_pud_mappings(p4d, addr, next, pfn, prot);
776 if (ret)
777 return ret;
778 pfn += (next - addr) >> PAGE_SHIFT;
779 } while (addr = next, addr != end);
780
781 return 0;
782}
783
98732d1b 784static int __create_hyp_mappings(pgd_t *pgdp, unsigned long ptrs_per_pgd,
6060df84
MZ
785 unsigned long start, unsigned long end,
786 unsigned long pfn, pgprot_t prot)
342cd0ab 787{
342cd0ab 788 pgd_t *pgd;
e9f63768 789 p4d_t *p4d;
342cd0ab
CD
790 unsigned long addr, next;
791 int err = 0;
792
342cd0ab 793 mutex_lock(&kvm_hyp_pgd_mutex);
3562c76d
MZ
794 addr = start & PAGE_MASK;
795 end = PAGE_ALIGN(end);
796 do {
3ddd4556 797 pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
342cd0ab 798
38f791a4 799 if (pgd_none(*pgd)) {
e9f63768
MR
800 p4d = p4d_alloc_one(NULL, addr);
801 if (!p4d) {
802 kvm_err("Cannot allocate Hyp p4d\n");
342cd0ab
CD
803 err = -ENOMEM;
804 goto out;
805 }
e9f63768 806 kvm_pgd_populate(pgd, p4d);
38f791a4 807 get_page(virt_to_page(pgd));
342cd0ab
CD
808 }
809
810 next = pgd_addr_end(addr, end);
e9f63768 811 err = create_hyp_p4d_mappings(pgd, addr, next, pfn, prot);
342cd0ab
CD
812 if (err)
813 goto out;
6060df84 814 pfn += (next - addr) >> PAGE_SHIFT;
3562c76d 815 } while (addr = next, addr != end);
342cd0ab
CD
816out:
817 mutex_unlock(&kvm_hyp_pgd_mutex);
818 return err;
819}
820
40c2729b
CD
821static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
822{
823 if (!is_vmalloc_addr(kaddr)) {
824 BUG_ON(!virt_addr_valid(kaddr));
825 return __pa(kaddr);
826 } else {
827 return page_to_phys(vmalloc_to_page(kaddr)) +
828 offset_in_page(kaddr);
829 }
830}
831
342cd0ab 832/**
06e8c3b0 833 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
342cd0ab
CD
834 * @from: The virtual kernel start address of the range
835 * @to: The virtual kernel end address of the range (exclusive)
c8dddecd 836 * @prot: The protection to be applied to this range
342cd0ab 837 *
06e8c3b0
MZ
838 * The same virtual address as the kernel virtual address is also used
839 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
840 * physical pages.
342cd0ab 841 */
c8dddecd 842int create_hyp_mappings(void *from, void *to, pgprot_t prot)
342cd0ab 843{
40c2729b
CD
844 phys_addr_t phys_addr;
845 unsigned long virt_addr;
6c41a413
MZ
846 unsigned long start = kern_hyp_va((unsigned long)from);
847 unsigned long end = kern_hyp_va((unsigned long)to);
6060df84 848
1e947bad
MZ
849 if (is_kernel_in_hyp_mode())
850 return 0;
851
40c2729b
CD
852 start = start & PAGE_MASK;
853 end = PAGE_ALIGN(end);
6060df84 854
40c2729b
CD
855 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
856 int err;
6060df84 857
40c2729b 858 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
98732d1b
KM
859 err = __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD,
860 virt_addr, virt_addr + PAGE_SIZE,
40c2729b 861 __phys_to_pfn(phys_addr),
c8dddecd 862 prot);
40c2729b
CD
863 if (err)
864 return err;
865 }
866
867 return 0;
342cd0ab
CD
868}
869
dc2e4633
MZ
870static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
871 unsigned long *haddr, pgprot_t prot)
342cd0ab 872{
e3f019b3
MZ
873 pgd_t *pgd = hyp_pgd;
874 unsigned long base;
875 int ret = 0;
6060df84 876
e3f019b3 877 mutex_lock(&kvm_hyp_pgd_mutex);
6060df84 878
e3f019b3 879 /*
656012c7 880 * This assumes that we have enough space below the idmap
e3f019b3
MZ
881 * page to allocate our VAs. If not, the check below will
882 * kick. A potential alternative would be to detect that
883 * overflow and switch to an allocation above the idmap.
884 *
885 * The allocated size is always a multiple of PAGE_SIZE.
886 */
887 size = PAGE_ALIGN(size + offset_in_page(phys_addr));
888 base = io_map_base - size;
1bb32a44 889
e3f019b3
MZ
890 /*
891 * Verify that BIT(VA_BITS - 1) hasn't been flipped by
892 * allocating the new area, as it would indicate we've
893 * overflowed the idmap/IO address range.
894 */
895 if ((base ^ io_map_base) & BIT(VA_BITS - 1))
896 ret = -ENOMEM;
897 else
898 io_map_base = base;
899
900 mutex_unlock(&kvm_hyp_pgd_mutex);
901
902 if (ret)
903 goto out;
904
905 if (__kvm_cpu_uses_extended_idmap())
906 pgd = boot_hyp_pgd;
907
908 ret = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
909 base, base + size,
dc2e4633 910 __phys_to_pfn(phys_addr), prot);
e3f019b3
MZ
911 if (ret)
912 goto out;
913
dc2e4633 914 *haddr = base + offset_in_page(phys_addr);
e3f019b3
MZ
915
916out:
dc2e4633
MZ
917 return ret;
918}
919
920/**
921 * create_hyp_io_mappings - Map IO into both kernel and HYP
922 * @phys_addr: The physical start address which gets mapped
923 * @size: Size of the region being mapped
924 * @kaddr: Kernel VA for this mapping
925 * @haddr: HYP VA for this mapping
926 */
927int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
928 void __iomem **kaddr,
929 void __iomem **haddr)
930{
931 unsigned long addr;
932 int ret;
933
934 *kaddr = ioremap(phys_addr, size);
935 if (!*kaddr)
936 return -ENOMEM;
937
938 if (is_kernel_in_hyp_mode()) {
939 *haddr = *kaddr;
940 return 0;
941 }
942
943 ret = __create_hyp_private_mapping(phys_addr, size,
944 &addr, PAGE_HYP_DEVICE);
1bb32a44
MZ
945 if (ret) {
946 iounmap(*kaddr);
947 *kaddr = NULL;
dc2e4633
MZ
948 *haddr = NULL;
949 return ret;
950 }
951
952 *haddr = (void __iomem *)addr;
953 return 0;
954}
955
956/**
957 * create_hyp_exec_mappings - Map an executable range into HYP
958 * @phys_addr: The physical start address which gets mapped
959 * @size: Size of the region being mapped
960 * @haddr: HYP VA for this mapping
961 */
962int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
963 void **haddr)
964{
965 unsigned long addr;
966 int ret;
967
968 BUG_ON(is_kernel_in_hyp_mode());
969
970 ret = __create_hyp_private_mapping(phys_addr, size,
971 &addr, PAGE_HYP_EXEC);
972 if (ret) {
973 *haddr = NULL;
1bb32a44
MZ
974 return ret;
975 }
976
dc2e4633 977 *haddr = (void *)addr;
1bb32a44 978 return 0;
342cd0ab
CD
979}
980
d5d8184d 981/**
a0e50aa3
CD
982 * kvm_init_stage2_mmu - Initialise a S2 MMU strucrure
983 * @kvm: The pointer to the KVM structure
984 * @mmu: The pointer to the s2 MMU structure
d5d8184d 985 *
8324c3d5 986 * Allocates only the stage-2 HW PGD level table(s) of size defined by
a0e50aa3 987 * stage2_pgd_size(mmu->kvm).
d5d8184d
CD
988 *
989 * Note we don't need locking here as this is only called when the VM is
990 * created, which can only be done once.
991 */
a0e50aa3 992int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
d5d8184d 993{
e329fb75 994 phys_addr_t pgd_phys;
d5d8184d 995 pgd_t *pgd;
a0e50aa3 996 int cpu;
d5d8184d 997
a0e50aa3 998 if (mmu->pgd != NULL) {
d5d8184d
CD
999 kvm_err("kvm_arch already initialized?\n");
1000 return -EINVAL;
1001 }
1002
9163ee23 1003 /* Allocate the HW PGD, making sure that each page gets its own refcount */
e55cac5b 1004 pgd = alloc_pages_exact(stage2_pgd_size(kvm), GFP_KERNEL | __GFP_ZERO);
9163ee23 1005 if (!pgd)
a987370f
MZ
1006 return -ENOMEM;
1007
e329fb75
CD
1008 pgd_phys = virt_to_phys(pgd);
1009 if (WARN_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm)))
1010 return -EINVAL;
1011
a0e50aa3
CD
1012 mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran));
1013 if (!mmu->last_vcpu_ran) {
1014 free_pages_exact(pgd, stage2_pgd_size(kvm));
1015 return -ENOMEM;
1016 }
1017
1018 for_each_possible_cpu(cpu)
1019 *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
1020
1021 mmu->kvm = kvm;
1022 mmu->pgd = pgd;
1023 mmu->pgd_phys = pgd_phys;
1024 mmu->vmid.vmid_gen = 0;
1025
d5d8184d
CD
1026 return 0;
1027}
1028
957db105
CD
1029static void stage2_unmap_memslot(struct kvm *kvm,
1030 struct kvm_memory_slot *memslot)
1031{
1032 hva_t hva = memslot->userspace_addr;
1033 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
1034 phys_addr_t size = PAGE_SIZE * memslot->npages;
1035 hva_t reg_end = hva + size;
1036
1037 /*
1038 * A memory region could potentially cover multiple VMAs, and any holes
1039 * between them, so iterate over all of them to find out if we should
1040 * unmap any of them.
1041 *
1042 * +--------------------------------------------+
1043 * +---------------+----------------+ +----------------+
1044 * | : VMA 1 | VMA 2 | | VMA 3 : |
1045 * +---------------+----------------+ +----------------+
1046 * | memory region |
1047 * +--------------------------------------------+
1048 */
1049 do {
1050 struct vm_area_struct *vma = find_vma(current->mm, hva);
1051 hva_t vm_start, vm_end;
1052
1053 if (!vma || vma->vm_start >= reg_end)
1054 break;
1055
1056 /*
1057 * Take the intersection of this VMA with the memory region
1058 */
1059 vm_start = max(hva, vma->vm_start);
1060 vm_end = min(reg_end, vma->vm_end);
1061
1062 if (!(vma->vm_flags & VM_PFNMAP)) {
1063 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
a0e50aa3 1064 unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
957db105
CD
1065 }
1066 hva = vm_end;
1067 } while (hva < reg_end);
1068}
1069
1070/**
1071 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
1072 * @kvm: The struct kvm pointer
1073 *
656012c7 1074 * Go through the memregions and unmap any regular RAM
957db105
CD
1075 * backing memory already mapped to the VM.
1076 */
1077void stage2_unmap_vm(struct kvm *kvm)
1078{
1079 struct kvm_memslots *slots;
1080 struct kvm_memory_slot *memslot;
1081 int idx;
1082
1083 idx = srcu_read_lock(&kvm->srcu);
89154dd5 1084 mmap_read_lock(current->mm);
957db105
CD
1085 spin_lock(&kvm->mmu_lock);
1086
1087 slots = kvm_memslots(kvm);
1088 kvm_for_each_memslot(memslot, slots)
1089 stage2_unmap_memslot(kvm, memslot);
1090
1091 spin_unlock(&kvm->mmu_lock);
89154dd5 1092 mmap_read_unlock(current->mm);
957db105
CD
1093 srcu_read_unlock(&kvm->srcu, idx);
1094}
1095
a0e50aa3 1096void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
d5d8184d 1097{
a0e50aa3 1098 struct kvm *kvm = mmu->kvm;
6c0d706b 1099 void *pgd = NULL;
d5d8184d 1100
8b3405e3 1101 spin_lock(&kvm->mmu_lock);
a0e50aa3
CD
1102 if (mmu->pgd) {
1103 unmap_stage2_range(mmu, 0, kvm_phys_size(kvm));
1104 pgd = READ_ONCE(mmu->pgd);
1105 mmu->pgd = NULL;
6c0d706b 1106 }
8b3405e3
SP
1107 spin_unlock(&kvm->mmu_lock);
1108
9163ee23 1109 /* Free the HW pgd, one page at a time */
a0e50aa3 1110 if (pgd) {
e55cac5b 1111 free_pages_exact(pgd, stage2_pgd_size(kvm));
a0e50aa3
CD
1112 free_percpu(mmu->last_vcpu_ran);
1113 }
d5d8184d
CD
1114}
1115
a0e50aa3 1116static p4d_t *stage2_get_p4d(struct kvm_s2_mmu *mmu, struct kvm_mmu_memory_cache *cache,
ad361f09 1117 phys_addr_t addr)
d5d8184d 1118{
a0e50aa3 1119 struct kvm *kvm = mmu->kvm;
d5d8184d 1120 pgd_t *pgd;
e9f63768 1121 p4d_t *p4d;
d5d8184d 1122
a0e50aa3 1123 pgd = mmu->pgd + stage2_pgd_index(kvm, addr);
e55cac5b 1124 if (stage2_pgd_none(kvm, *pgd)) {
38f791a4
CD
1125 if (!cache)
1126 return NULL;
c1a33aeb 1127 p4d = kvm_mmu_memory_cache_alloc(cache);
e9f63768 1128 stage2_pgd_populate(kvm, pgd, p4d);
38f791a4
CD
1129 get_page(virt_to_page(pgd));
1130 }
1131
e9f63768
MR
1132 return stage2_p4d_offset(kvm, pgd, addr);
1133}
1134
a0e50aa3 1135static pud_t *stage2_get_pud(struct kvm_s2_mmu *mmu, struct kvm_mmu_memory_cache *cache,
e9f63768
MR
1136 phys_addr_t addr)
1137{
a0e50aa3 1138 struct kvm *kvm = mmu->kvm;
e9f63768
MR
1139 p4d_t *p4d;
1140 pud_t *pud;
1141
a0e50aa3 1142 p4d = stage2_get_p4d(mmu, cache, addr);
e9f63768
MR
1143 if (stage2_p4d_none(kvm, *p4d)) {
1144 if (!cache)
1145 return NULL;
c1a33aeb 1146 pud = kvm_mmu_memory_cache_alloc(cache);
e9f63768
MR
1147 stage2_p4d_populate(kvm, p4d, pud);
1148 get_page(virt_to_page(p4d));
1149 }
1150
1151 return stage2_pud_offset(kvm, p4d, addr);
38f791a4
CD
1152}
1153
a0e50aa3 1154static pmd_t *stage2_get_pmd(struct kvm_s2_mmu *mmu, struct kvm_mmu_memory_cache *cache,
38f791a4
CD
1155 phys_addr_t addr)
1156{
a0e50aa3 1157 struct kvm *kvm = mmu->kvm;
38f791a4
CD
1158 pud_t *pud;
1159 pmd_t *pmd;
1160
a0e50aa3 1161 pud = stage2_get_pud(mmu, cache, addr);
b8e0ba7c 1162 if (!pud || stage2_pud_huge(kvm, *pud))
d6dbdd3c
MZ
1163 return NULL;
1164
e55cac5b 1165 if (stage2_pud_none(kvm, *pud)) {
d5d8184d 1166 if (!cache)
ad361f09 1167 return NULL;
c1a33aeb 1168 pmd = kvm_mmu_memory_cache_alloc(cache);
e55cac5b 1169 stage2_pud_populate(kvm, pud, pmd);
d5d8184d 1170 get_page(virt_to_page(pud));
c62ee2b2
MZ
1171 }
1172
e55cac5b 1173 return stage2_pmd_offset(kvm, pud, addr);
ad361f09
CD
1174}
1175
a0e50aa3
CD
1176static int stage2_set_pmd_huge(struct kvm_s2_mmu *mmu,
1177 struct kvm_mmu_memory_cache *cache,
1178 phys_addr_t addr, const pmd_t *new_pmd)
ad361f09
CD
1179{
1180 pmd_t *pmd, old_pmd;
1181
3c3736cd 1182retry:
a0e50aa3 1183 pmd = stage2_get_pmd(mmu, cache, addr);
ad361f09 1184 VM_BUG_ON(!pmd);
d5d8184d 1185
ad361f09 1186 old_pmd = *pmd;
3c3736cd
SP
1187 /*
1188 * Multiple vcpus faulting on the same PMD entry, can
1189 * lead to them sequentially updating the PMD with the
1190 * same value. Following the break-before-make
1191 * (pmd_clear() followed by tlb_flush()) process can
1192 * hinder forward progress due to refaults generated
1193 * on missing translations.
1194 *
1195 * Skip updating the page table if the entry is
1196 * unchanged.
1197 */
1198 if (pmd_val(old_pmd) == pmd_val(*new_pmd))
1199 return 0;
1200
d4b9e079 1201 if (pmd_present(old_pmd)) {
86658b81 1202 /*
3c3736cd
SP
1203 * If we already have PTE level mapping for this block,
1204 * we must unmap it to avoid inconsistent TLB state and
1205 * leaking the table page. We could end up in this situation
1206 * if the memory slot was marked for dirty logging and was
1207 * reverted, leaving PTE level mappings for the pages accessed
1208 * during the period. So, unmap the PTE level mapping for this
1209 * block and retry, as we could have released the upper level
1210 * table in the process.
86658b81 1211 *
3c3736cd
SP
1212 * Normal THP split/merge follows mmu_notifier callbacks and do
1213 * get handled accordingly.
86658b81 1214 */
3c3736cd 1215 if (!pmd_thp_or_huge(old_pmd)) {
a0e50aa3 1216 unmap_stage2_range(mmu, addr & S2_PMD_MASK, S2_PMD_SIZE);
3c3736cd
SP
1217 goto retry;
1218 }
86658b81
PA
1219 /*
1220 * Mapping in huge pages should only happen through a
1221 * fault. If a page is merged into a transparent huge
1222 * page, the individual subpages of that huge page
1223 * should be unmapped through MMU notifiers before we
1224 * get here.
1225 *
1226 * Merging of CompoundPages is not supported; they
1227 * should become splitting first, unmapped, merged,
1228 * and mapped back in on-demand.
1229 */
3c3736cd 1230 WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
d4b9e079 1231 pmd_clear(pmd);
efaa5b93 1232 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PMD_LEVEL);
d4b9e079 1233 } else {
ad361f09 1234 get_page(virt_to_page(pmd));
d4b9e079
MZ
1235 }
1236
1237 kvm_set_pmd(pmd, *new_pmd);
ad361f09
CD
1238 return 0;
1239}
1240
a0e50aa3
CD
1241static int stage2_set_pud_huge(struct kvm_s2_mmu *mmu,
1242 struct kvm_mmu_memory_cache *cache,
b8e0ba7c
PA
1243 phys_addr_t addr, const pud_t *new_pudp)
1244{
a0e50aa3 1245 struct kvm *kvm = mmu->kvm;
b8e0ba7c
PA
1246 pud_t *pudp, old_pud;
1247
3c3736cd 1248retry:
a0e50aa3 1249 pudp = stage2_get_pud(mmu, cache, addr);
b8e0ba7c
PA
1250 VM_BUG_ON(!pudp);
1251
1252 old_pud = *pudp;
1253
1254 /*
1255 * A large number of vcpus faulting on the same stage 2 entry,
3c3736cd
SP
1256 * can lead to a refault due to the stage2_pud_clear()/tlb_flush().
1257 * Skip updating the page tables if there is no change.
b8e0ba7c
PA
1258 */
1259 if (pud_val(old_pud) == pud_val(*new_pudp))
1260 return 0;
1261
1262 if (stage2_pud_present(kvm, old_pud)) {
3c3736cd
SP
1263 /*
1264 * If we already have table level mapping for this block, unmap
1265 * the range for this block and retry.
1266 */
1267 if (!stage2_pud_huge(kvm, old_pud)) {
a0e50aa3 1268 unmap_stage2_range(mmu, addr & S2_PUD_MASK, S2_PUD_SIZE);
3c3736cd
SP
1269 goto retry;
1270 }
1271
1272 WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp));
b8e0ba7c 1273 stage2_pud_clear(kvm, pudp);
efaa5b93 1274 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PUD_LEVEL);
b8e0ba7c
PA
1275 } else {
1276 get_page(virt_to_page(pudp));
1277 }
1278
1279 kvm_set_pud(pudp, *new_pudp);
1280 return 0;
1281}
1282
86d1c55e
PA
1283/*
1284 * stage2_get_leaf_entry - walk the stage2 VM page tables and return
1285 * true if a valid and present leaf-entry is found. A pointer to the
1286 * leaf-entry is returned in the appropriate level variable - pudpp,
1287 * pmdpp, ptepp.
1288 */
a0e50aa3 1289static bool stage2_get_leaf_entry(struct kvm_s2_mmu *mmu, phys_addr_t addr,
86d1c55e 1290 pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp)
7a3796d2 1291{
a0e50aa3 1292 struct kvm *kvm = mmu->kvm;
86d1c55e 1293 pud_t *pudp;
7a3796d2
MZ
1294 pmd_t *pmdp;
1295 pte_t *ptep;
1296
86d1c55e
PA
1297 *pudpp = NULL;
1298 *pmdpp = NULL;
1299 *ptepp = NULL;
1300
a0e50aa3 1301 pudp = stage2_get_pud(mmu, NULL, addr);
86d1c55e
PA
1302 if (!pudp || stage2_pud_none(kvm, *pudp) || !stage2_pud_present(kvm, *pudp))
1303 return false;
1304
1305 if (stage2_pud_huge(kvm, *pudp)) {
1306 *pudpp = pudp;
1307 return true;
1308 }
1309
1310 pmdp = stage2_pmd_offset(kvm, pudp, addr);
7a3796d2
MZ
1311 if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp))
1312 return false;
1313
86d1c55e
PA
1314 if (pmd_thp_or_huge(*pmdp)) {
1315 *pmdpp = pmdp;
1316 return true;
1317 }
7a3796d2
MZ
1318
1319 ptep = pte_offset_kernel(pmdp, addr);
1320 if (!ptep || pte_none(*ptep) || !pte_present(*ptep))
1321 return false;
1322
86d1c55e
PA
1323 *ptepp = ptep;
1324 return true;
1325}
1326
0378daef 1327static bool stage2_is_exec(struct kvm_s2_mmu *mmu, phys_addr_t addr, unsigned long sz)
86d1c55e
PA
1328{
1329 pud_t *pudp;
1330 pmd_t *pmdp;
1331 pte_t *ptep;
1332 bool found;
1333
a0e50aa3 1334 found = stage2_get_leaf_entry(mmu, addr, &pudp, &pmdp, &ptep);
86d1c55e
PA
1335 if (!found)
1336 return false;
1337
1338 if (pudp)
b757b47a 1339 return sz <= PUD_SIZE && kvm_s2pud_exec(pudp);
86d1c55e 1340 else if (pmdp)
b757b47a 1341 return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp);
86d1c55e 1342 else
b757b47a 1343 return sz == PAGE_SIZE && kvm_s2pte_exec(ptep);
7a3796d2
MZ
1344}
1345
a0e50aa3
CD
1346static int stage2_set_pte(struct kvm_s2_mmu *mmu,
1347 struct kvm_mmu_memory_cache *cache,
15a49a44
MS
1348 phys_addr_t addr, const pte_t *new_pte,
1349 unsigned long flags)
ad361f09 1350{
a0e50aa3 1351 struct kvm *kvm = mmu->kvm;
b8e0ba7c 1352 pud_t *pud;
ad361f09
CD
1353 pmd_t *pmd;
1354 pte_t *pte, old_pte;
15a49a44
MS
1355 bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
1356 bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
1357
1358 VM_BUG_ON(logging_active && !cache);
ad361f09 1359
38f791a4 1360 /* Create stage-2 page table mapping - Levels 0 and 1 */
a0e50aa3 1361 pud = stage2_get_pud(mmu, cache, addr);
b8e0ba7c
PA
1362 if (!pud) {
1363 /*
1364 * Ignore calls from kvm_set_spte_hva for unallocated
1365 * address ranges.
1366 */
1367 return 0;
1368 }
1369
1370 /*
1371 * While dirty page logging - dissolve huge PUD, then continue
1372 * on to allocate page.
1373 */
1374 if (logging_active)
a0e50aa3 1375 stage2_dissolve_pud(mmu, addr, pud);
b8e0ba7c
PA
1376
1377 if (stage2_pud_none(kvm, *pud)) {
1378 if (!cache)
1379 return 0; /* ignore calls from kvm_set_spte_hva */
c1a33aeb 1380 pmd = kvm_mmu_memory_cache_alloc(cache);
b8e0ba7c
PA
1381 stage2_pud_populate(kvm, pud, pmd);
1382 get_page(virt_to_page(pud));
1383 }
1384
1385 pmd = stage2_pmd_offset(kvm, pud, addr);
ad361f09
CD
1386 if (!pmd) {
1387 /*
1388 * Ignore calls from kvm_set_spte_hva for unallocated
1389 * address ranges.
1390 */
1391 return 0;
1392 }
1393
15a49a44
MS
1394 /*
1395 * While dirty page logging - dissolve huge PMD, then continue on to
1396 * allocate page.
1397 */
1398 if (logging_active)
a0e50aa3 1399 stage2_dissolve_pmd(mmu, addr, pmd);
15a49a44 1400
ad361f09 1401 /* Create stage-2 page mappings - Level 2 */
d5d8184d
CD
1402 if (pmd_none(*pmd)) {
1403 if (!cache)
1404 return 0; /* ignore calls from kvm_set_spte_hva */
c1a33aeb 1405 pte = kvm_mmu_memory_cache_alloc(cache);
0db9dd8a 1406 kvm_pmd_populate(pmd, pte);
d5d8184d 1407 get_page(virt_to_page(pmd));
c62ee2b2
MZ
1408 }
1409
1410 pte = pte_offset_kernel(pmd, addr);
d5d8184d
CD
1411
1412 if (iomap && pte_present(*pte))
1413 return -EFAULT;
1414
1415 /* Create 2nd stage page table mapping - Level 3 */
1416 old_pte = *pte;
d4b9e079 1417 if (pte_present(old_pte)) {
976d34e2
PA
1418 /* Skip page table update if there is no change */
1419 if (pte_val(old_pte) == pte_val(*new_pte))
1420 return 0;
1421
d4b9e079 1422 kvm_set_pte(pte, __pte(0));
efaa5b93 1423 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PTE_LEVEL);
d4b9e079 1424 } else {
d5d8184d 1425 get_page(virt_to_page(pte));
d4b9e079 1426 }
d5d8184d 1427
d4b9e079 1428 kvm_set_pte(pte, *new_pte);
d5d8184d
CD
1429 return 0;
1430}
d5d8184d 1431
06485053
CM
1432#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1433static int stage2_ptep_test_and_clear_young(pte_t *pte)
1434{
1435 if (pte_young(*pte)) {
1436 *pte = pte_mkold(*pte);
1437 return 1;
1438 }
d5d8184d
CD
1439 return 0;
1440}
06485053
CM
1441#else
1442static int stage2_ptep_test_and_clear_young(pte_t *pte)
1443{
1444 return __ptep_test_and_clear_young(pte);
1445}
1446#endif
1447
1448static int stage2_pmdp_test_and_clear_young(pmd_t *pmd)
1449{
1450 return stage2_ptep_test_and_clear_young((pte_t *)pmd);
1451}
d5d8184d 1452
35a63966
PA
1453static int stage2_pudp_test_and_clear_young(pud_t *pud)
1454{
1455 return stage2_ptep_test_and_clear_young((pte_t *)pud);
1456}
1457
d5d8184d
CD
1458/**
1459 * kvm_phys_addr_ioremap - map a device range to guest IPA
1460 *
1461 * @kvm: The KVM pointer
1462 * @guest_ipa: The IPA at which to insert the mapping
1463 * @pa: The physical address of the device
1464 * @size: The size of the mapping
1465 */
1466int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
c40f2f8f 1467 phys_addr_t pa, unsigned long size, bool writable)
d5d8184d
CD
1468{
1469 phys_addr_t addr, end;
1470 int ret = 0;
1471 unsigned long pfn;
c1a33aeb 1472 struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, };
d5d8184d
CD
1473
1474 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
1475 pfn = __phys_to_pfn(pa);
1476
1477 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
f8df7338 1478 pte_t pte = kvm_pfn_pte(pfn, PAGE_S2_DEVICE);
d5d8184d 1479
c40f2f8f 1480 if (writable)
06485053 1481 pte = kvm_s2pte_mkwrite(pte);
c40f2f8f 1482
c1a33aeb
SC
1483 ret = kvm_mmu_topup_memory_cache(&cache,
1484 kvm_mmu_cache_min_pages(kvm));
d5d8184d
CD
1485 if (ret)
1486 goto out;
1487 spin_lock(&kvm->mmu_lock);
a0e50aa3
CD
1488 ret = stage2_set_pte(&kvm->arch.mmu, &cache, addr, &pte,
1489 KVM_S2PTE_FLAG_IS_IOMAP);
d5d8184d
CD
1490 spin_unlock(&kvm->mmu_lock);
1491 if (ret)
1492 goto out;
1493
1494 pfn++;
1495 }
1496
1497out:
c1a33aeb 1498 kvm_mmu_free_memory_cache(&cache);
d5d8184d
CD
1499 return ret;
1500}
1501
c6473555
MS
1502/**
1503 * stage2_wp_ptes - write protect PMD range
1504 * @pmd: pointer to pmd entry
1505 * @addr: range start address
1506 * @end: range end address
1507 */
1508static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
1509{
1510 pte_t *pte;
1511
1512 pte = pte_offset_kernel(pmd, addr);
1513 do {
1514 if (!pte_none(*pte)) {
1515 if (!kvm_s2pte_readonly(pte))
1516 kvm_set_s2pte_readonly(pte);
1517 }
1518 } while (pte++, addr += PAGE_SIZE, addr != end);
1519}
1520
1521/**
1522 * stage2_wp_pmds - write protect PUD range
e55cac5b 1523 * kvm: kvm instance for the VM
c6473555
MS
1524 * @pud: pointer to pud entry
1525 * @addr: range start address
1526 * @end: range end address
1527 */
a0e50aa3 1528static void stage2_wp_pmds(struct kvm_s2_mmu *mmu, pud_t *pud,
e55cac5b 1529 phys_addr_t addr, phys_addr_t end)
c6473555 1530{
a0e50aa3 1531 struct kvm *kvm = mmu->kvm;
c6473555
MS
1532 pmd_t *pmd;
1533 phys_addr_t next;
1534
e55cac5b 1535 pmd = stage2_pmd_offset(kvm, pud, addr);
c6473555
MS
1536
1537 do {
e55cac5b 1538 next = stage2_pmd_addr_end(kvm, addr, end);
c6473555 1539 if (!pmd_none(*pmd)) {
bbb3b6b3 1540 if (pmd_thp_or_huge(*pmd)) {
c6473555
MS
1541 if (!kvm_s2pmd_readonly(pmd))
1542 kvm_set_s2pmd_readonly(pmd);
1543 } else {
1544 stage2_wp_ptes(pmd, addr, next);
1545 }
1546 }
1547 } while (pmd++, addr = next, addr != end);
1548}
1549
1550/**
e9f63768 1551 * stage2_wp_puds - write protect P4D range
a0e50aa3 1552 * @p4d: pointer to p4d entry
8324c3d5
ZY
1553 * @addr: range start address
1554 * @end: range end address
1555 */
a0e50aa3 1556static void stage2_wp_puds(struct kvm_s2_mmu *mmu, p4d_t *p4d,
e55cac5b 1557 phys_addr_t addr, phys_addr_t end)
c6473555 1558{
a0e50aa3 1559 struct kvm *kvm = mmu->kvm;
c6473555
MS
1560 pud_t *pud;
1561 phys_addr_t next;
1562
e9f63768 1563 pud = stage2_pud_offset(kvm, p4d, addr);
c6473555 1564 do {
e55cac5b
SP
1565 next = stage2_pud_addr_end(kvm, addr, end);
1566 if (!stage2_pud_none(kvm, *pud)) {
4ea5af53
PA
1567 if (stage2_pud_huge(kvm, *pud)) {
1568 if (!kvm_s2pud_readonly(pud))
1569 kvm_set_s2pud_readonly(pud);
1570 } else {
a0e50aa3 1571 stage2_wp_pmds(mmu, pud, addr, next);
4ea5af53 1572 }
c6473555
MS
1573 }
1574 } while (pud++, addr = next, addr != end);
1575}
1576
e9f63768
MR
1577/**
1578 * stage2_wp_p4ds - write protect PGD range
1579 * @pgd: pointer to pgd entry
1580 * @addr: range start address
1581 * @end: range end address
1582 */
a0e50aa3 1583static void stage2_wp_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
e9f63768
MR
1584 phys_addr_t addr, phys_addr_t end)
1585{
a0e50aa3 1586 struct kvm *kvm = mmu->kvm;
e9f63768
MR
1587 p4d_t *p4d;
1588 phys_addr_t next;
1589
1590 p4d = stage2_p4d_offset(kvm, pgd, addr);
1591 do {
1592 next = stage2_p4d_addr_end(kvm, addr, end);
1593 if (!stage2_p4d_none(kvm, *p4d))
a0e50aa3 1594 stage2_wp_puds(mmu, p4d, addr, next);
e9f63768
MR
1595 } while (p4d++, addr = next, addr != end);
1596}
1597
c6473555
MS
1598/**
1599 * stage2_wp_range() - write protect stage2 memory region range
1600 * @kvm: The KVM pointer
1601 * @addr: Start address of range
1602 * @end: End address of range
1603 */
a0e50aa3 1604static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
c6473555 1605{
a0e50aa3 1606 struct kvm *kvm = mmu->kvm;
c6473555
MS
1607 pgd_t *pgd;
1608 phys_addr_t next;
1609
a0e50aa3 1610 pgd = mmu->pgd + stage2_pgd_index(kvm, addr);
c6473555
MS
1611 do {
1612 /*
1613 * Release kvm_mmu_lock periodically if the memory region is
1614 * large. Otherwise, we may see kernel panics with
227ea818
CD
1615 * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
1616 * CONFIG_LOCKDEP. Additionally, holding the lock too long
0c428a6a
SP
1617 * will also starve other vCPUs. We have to also make sure
1618 * that the page tables are not freed while we released
1619 * the lock.
c6473555 1620 */
0c428a6a 1621 cond_resched_lock(&kvm->mmu_lock);
a0e50aa3 1622 if (!READ_ONCE(mmu->pgd))
0c428a6a 1623 break;
e55cac5b
SP
1624 next = stage2_pgd_addr_end(kvm, addr, end);
1625 if (stage2_pgd_present(kvm, *pgd))
a0e50aa3 1626 stage2_wp_p4ds(mmu, pgd, addr, next);
c6473555
MS
1627 } while (pgd++, addr = next, addr != end);
1628}
1629
1630/**
1631 * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
1632 * @kvm: The KVM pointer
1633 * @slot: The memory slot to write protect
1634 *
1635 * Called to start logging dirty pages after memory region
1636 * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
4ea5af53 1637 * all present PUD, PMD and PTEs are write protected in the memory region.
c6473555
MS
1638 * Afterwards read of dirty page log can be called.
1639 *
1640 * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
1641 * serializing operations for VM memory regions.
1642 */
1643void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
1644{
9f6b8029
PB
1645 struct kvm_memslots *slots = kvm_memslots(kvm);
1646 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
0577d1ab
SC
1647 phys_addr_t start, end;
1648
1649 if (WARN_ON_ONCE(!memslot))
1650 return;
1651
1652 start = memslot->base_gfn << PAGE_SHIFT;
1653 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
c6473555
MS
1654
1655 spin_lock(&kvm->mmu_lock);
a0e50aa3 1656 stage2_wp_range(&kvm->arch.mmu, start, end);
c6473555
MS
1657 spin_unlock(&kvm->mmu_lock);
1658 kvm_flush_remote_tlbs(kvm);
1659}
53c810c3
MS
1660
1661/**
3b0f1d01 1662 * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
53c810c3
MS
1663 * @kvm: The KVM pointer
1664 * @slot: The memory slot associated with mask
1665 * @gfn_offset: The gfn offset in memory slot
1666 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
1667 * slot to be write protected
1668 *
1669 * Walks bits set in mask write protects the associated pte's. Caller must
1670 * acquire kvm_mmu_lock.
1671 */
3b0f1d01 1672static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
53c810c3
MS
1673 struct kvm_memory_slot *slot,
1674 gfn_t gfn_offset, unsigned long mask)
1675{
1676 phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
1677 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
1678 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1679
a0e50aa3 1680 stage2_wp_range(&kvm->arch.mmu, start, end);
53c810c3 1681}
c6473555 1682
3b0f1d01
KH
1683/*
1684 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1685 * dirty pages.
1686 *
1687 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1688 * enable dirty logging for them.
1689 */
1690void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1691 struct kvm_memory_slot *slot,
1692 gfn_t gfn_offset, unsigned long mask)
1693{
1694 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1695}
1696
17ab9d57 1697static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
0d3e4d4f 1698{
17ab9d57 1699 __clean_dcache_guest_page(pfn, size);
a15f6939
MZ
1700}
1701
17ab9d57 1702static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
a15f6939 1703{
17ab9d57 1704 __invalidate_icache_guest_page(pfn, size);
0d3e4d4f
MZ
1705}
1706
1559b758 1707static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
196f878a 1708{
795a8371 1709 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
196f878a
JM
1710}
1711
a80868f3
SP
1712static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
1713 unsigned long hva,
1714 unsigned long map_size)
6794ad54 1715{
c2be79a0 1716 gpa_t gpa_start;
6794ad54
CD
1717 hva_t uaddr_start, uaddr_end;
1718 size_t size;
1719
9f283614
SP
1720 /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */
1721 if (map_size == PAGE_SIZE)
1722 return true;
1723
6794ad54
CD
1724 size = memslot->npages * PAGE_SIZE;
1725
1726 gpa_start = memslot->base_gfn << PAGE_SHIFT;
6794ad54
CD
1727
1728 uaddr_start = memslot->userspace_addr;
1729 uaddr_end = uaddr_start + size;
1730
1731 /*
1732 * Pages belonging to memslots that don't have the same alignment
a80868f3
SP
1733 * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2
1734 * PMD/PUD entries, because we'll end up mapping the wrong pages.
6794ad54
CD
1735 *
1736 * Consider a layout like the following:
1737 *
1738 * memslot->userspace_addr:
1739 * +-----+--------------------+--------------------+---+
a80868f3 1740 * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz|
6794ad54
CD
1741 * +-----+--------------------+--------------------+---+
1742 *
9f283614 1743 * memslot->base_gfn << PAGE_SHIFT:
6794ad54 1744 * +---+--------------------+--------------------+-----+
a80868f3 1745 * |abc|def Stage-2 block | Stage-2 block |tvxyz|
6794ad54
CD
1746 * +---+--------------------+--------------------+-----+
1747 *
a80868f3 1748 * If we create those stage-2 blocks, we'll end up with this incorrect
6794ad54
CD
1749 * mapping:
1750 * d -> f
1751 * e -> g
1752 * f -> h
1753 */
a80868f3 1754 if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
6794ad54
CD
1755 return false;
1756
1757 /*
1758 * Next, let's make sure we're not trying to map anything not covered
a80868f3
SP
1759 * by the memslot. This means we have to prohibit block size mappings
1760 * for the beginning and end of a non-block aligned and non-block sized
6794ad54
CD
1761 * memory slot (illustrated by the head and tail parts of the
1762 * userspace view above containing pages 'abcde' and 'xyz',
1763 * respectively).
1764 *
1765 * Note that it doesn't matter if we do the check using the
1766 * userspace_addr or the base_gfn, as both are equally aligned (per
1767 * the check above) and equally sized.
1768 */
a80868f3
SP
1769 return (hva & ~(map_size - 1)) >= uaddr_start &&
1770 (hva & ~(map_size - 1)) + map_size <= uaddr_end;
6794ad54
CD
1771}
1772
0529c902
SP
1773/*
1774 * Check if the given hva is backed by a transparent huge page (THP) and
1775 * whether it can be mapped using block mapping in stage2. If so, adjust
1776 * the stage2 PFN and IPA accordingly. Only PMD_SIZE THPs are currently
1777 * supported. This will need to be updated to support other THP sizes.
1778 *
1779 * Returns the size of the mapping.
1780 */
1781static unsigned long
1782transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
1783 unsigned long hva, kvm_pfn_t *pfnp,
1784 phys_addr_t *ipap)
1785{
1786 kvm_pfn_t pfn = *pfnp;
1787
1788 /*
1789 * Make sure the adjustment is done only for THP pages. Also make
1790 * sure that the HVA and IPA are sufficiently aligned and that the
1791 * block map is contained within the memslot.
1792 */
1793 if (kvm_is_transparent_hugepage(pfn) &&
1794 fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
1795 /*
1796 * The address we faulted on is backed by a transparent huge
1797 * page. However, because we map the compound huge page and
1798 * not the individual tail page, we need to transfer the
1799 * refcount to the head page. We have to be careful that the
1800 * THP doesn't start to split while we are adjusting the
1801 * refcounts.
1802 *
1803 * We are sure this doesn't happen, because mmu_notifier_retry
1804 * was successful and we are holding the mmu_lock, so if this
1805 * THP is trying to split, it will be blocked in the mmu
1806 * notifier before touching any of the pages, specifically
1807 * before being able to call __split_huge_page_refcount().
1808 *
1809 * We can therefore safely transfer the refcount from PG_tail
1810 * to PG_head and switch the pfn from a tail page to the head
1811 * page accordingly.
1812 */
1813 *ipap &= PMD_MASK;
1814 kvm_release_pfn_clean(pfn);
1815 pfn &= ~(PTRS_PER_PMD - 1);
1816 kvm_get_pfn(pfn);
1817 *pfnp = pfn;
1818
1819 return PMD_SIZE;
1820 }
1821
1822 /* Use page mapping if we cannot use block mapping. */
1823 return PAGE_SIZE;
1824}
1825
94f8e641 1826static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
98047888 1827 struct kvm_memory_slot *memslot, unsigned long hva,
94f8e641
CD
1828 unsigned long fault_status)
1829{
94f8e641 1830 int ret;
6396b852
PA
1831 bool write_fault, writable, force_pte = false;
1832 bool exec_fault, needs_exec;
94f8e641 1833 unsigned long mmu_seq;
ad361f09 1834 gfn_t gfn = fault_ipa >> PAGE_SHIFT;
ad361f09 1835 struct kvm *kvm = vcpu->kvm;
94f8e641 1836 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
ad361f09 1837 struct vm_area_struct *vma;
1559b758 1838 short vma_shift;
ba049e93 1839 kvm_pfn_t pfn;
b8865767 1840 pgprot_t mem_type = PAGE_S2;
15a49a44 1841 bool logging_active = memslot_is_logging(memslot);
3f58bf63 1842 unsigned long vma_pagesize, flags = 0;
a0e50aa3 1843 struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu;
94f8e641 1844
a7d079ce 1845 write_fault = kvm_is_write_fault(vcpu);
d0e22b4a
MZ
1846 exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
1847 VM_BUG_ON(write_fault && exec_fault);
1848
1849 if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
94f8e641
CD
1850 kvm_err("Unexpected L2 read permission error\n");
1851 return -EFAULT;
1852 }
1853
ad361f09 1854 /* Let's check if we will get back a huge page backed by hugetlbfs */
89154dd5 1855 mmap_read_lock(current->mm);
ad361f09 1856 vma = find_vma_intersection(current->mm, hva, hva + 1);
37b54408
AB
1857 if (unlikely(!vma)) {
1858 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
89154dd5 1859 mmap_read_unlock(current->mm);
37b54408
AB
1860 return -EFAULT;
1861 }
1862
1559b758
JM
1863 if (is_vm_hugetlb_page(vma))
1864 vma_shift = huge_page_shift(hstate_vma(vma));
1865 else
1866 vma_shift = PAGE_SHIFT;
1867
1868 vma_pagesize = 1ULL << vma_shift;
a80868f3 1869 if (logging_active ||
6d674e28 1870 (vma->vm_flags & VM_PFNMAP) ||
a80868f3
SP
1871 !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
1872 force_pte = true;
1873 vma_pagesize = PAGE_SIZE;
7b75cd51 1874 vma_shift = PAGE_SHIFT;
a80868f3
SP
1875 }
1876
b8e0ba7c 1877 /*
280cebfd
SP
1878 * The stage2 has a minimum of 2 level table (For arm64 see
1879 * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
1880 * use PMD_SIZE huge mappings (even when the PMD is folded into PGD).
1881 * As for PUD huge maps, we must make sure that we have at least
1882 * 3 levels, i.e, PMD is not folded.
b8e0ba7c 1883 */
a80868f3
SP
1884 if (vma_pagesize == PMD_SIZE ||
1885 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
b8e0ba7c 1886 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
89154dd5 1887 mmap_read_unlock(current->mm);
ad361f09 1888
94f8e641 1889 /* We need minimum second+third level pages */
c1a33aeb 1890 ret = kvm_mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm));
94f8e641
CD
1891 if (ret)
1892 return ret;
1893
1894 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1895 /*
1896 * Ensure the read of mmu_notifier_seq happens before we call
1897 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
1898 * the page we just got a reference to gets unmapped before we have a
1899 * chance to grab the mmu_lock, which ensure that if the page gets
1900 * unmapped afterwards, the call to kvm_unmap_hva will take it away
1901 * from us again properly. This smp_rmb() interacts with the smp_wmb()
1902 * in kvm_mmu_notifier_invalidate_<page|range_end>.
1903 */
1904 smp_rmb();
1905
ad361f09 1906 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
196f878a 1907 if (pfn == KVM_PFN_ERR_HWPOISON) {
1559b758 1908 kvm_send_hwpoison_signal(hva, vma_shift);
196f878a
JM
1909 return 0;
1910 }
9ac71595 1911 if (is_error_noslot_pfn(pfn))
94f8e641
CD
1912 return -EFAULT;
1913
15a49a44 1914 if (kvm_is_device_pfn(pfn)) {
b8865767 1915 mem_type = PAGE_S2_DEVICE;
15a49a44
MS
1916 flags |= KVM_S2PTE_FLAG_IS_IOMAP;
1917 } else if (logging_active) {
1918 /*
1919 * Faults on pages in a memslot with logging enabled
1920 * should not be mapped with huge pages (it introduces churn
1921 * and performance degradation), so force a pte mapping.
1922 */
15a49a44
MS
1923 flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
1924
1925 /*
1926 * Only actually map the page as writable if this was a write
1927 * fault.
1928 */
1929 if (!write_fault)
1930 writable = false;
1931 }
b8865767 1932
6d674e28
MZ
1933 if (exec_fault && is_iomap(flags))
1934 return -ENOEXEC;
1935
ad361f09
CD
1936 spin_lock(&kvm->mmu_lock);
1937 if (mmu_notifier_retry(kvm, mmu_seq))
94f8e641 1938 goto out_unlock;
15a49a44 1939
0529c902
SP
1940 /*
1941 * If we are not forced to use page mapping, check if we are
1942 * backed by a THP and thus use block mapping if possible.
1943 */
1944 if (vma_pagesize == PAGE_SIZE && !force_pte)
1945 vma_pagesize = transparent_hugepage_adjust(memslot, hva,
1946 &pfn, &fault_ipa);
3f58bf63
PA
1947 if (writable)
1948 kvm_set_pfn_dirty(pfn);
ad361f09 1949
6d674e28 1950 if (fault_status != FSC_PERM && !is_iomap(flags))
3f58bf63
PA
1951 clean_dcache_guest_page(pfn, vma_pagesize);
1952
1953 if (exec_fault)
1954 invalidate_icache_guest_page(pfn, vma_pagesize);
1955
6396b852
PA
1956 /*
1957 * If we took an execution fault we have made the
1958 * icache/dcache coherent above and should now let the s2
1959 * mapping be executable.
1960 *
1961 * Write faults (!exec_fault && FSC_PERM) are orthogonal to
1962 * execute permissions, and we preserve whatever we have.
1963 */
1964 needs_exec = exec_fault ||
b757b47a 1965 (fault_status == FSC_PERM &&
0378daef 1966 stage2_is_exec(mmu, fault_ipa, vma_pagesize));
6396b852 1967
3fb884ff
MZ
1968 /*
1969 * If PUD_SIZE == PMD_SIZE, there is no real PUD level, and
1970 * all we have is a 2-level page table. Trying to map a PUD in
1971 * this case would be fatally wrong.
1972 */
1973 if (PUD_SIZE != PMD_SIZE && vma_pagesize == PUD_SIZE) {
b8e0ba7c
PA
1974 pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
1975
1976 new_pud = kvm_pud_mkhuge(new_pud);
1977 if (writable)
1978 new_pud = kvm_s2pud_mkwrite(new_pud);
1979
1980 if (needs_exec)
1981 new_pud = kvm_s2pud_mkexec(new_pud);
1982
a0e50aa3 1983 ret = stage2_set_pud_huge(mmu, memcache, fault_ipa, &new_pud);
b8e0ba7c 1984 } else if (vma_pagesize == PMD_SIZE) {
f8df7338
PA
1985 pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
1986
1987 new_pmd = kvm_pmd_mkhuge(new_pmd);
1988
3f58bf63 1989 if (writable)
06485053 1990 new_pmd = kvm_s2pmd_mkwrite(new_pmd);
d0e22b4a 1991
6396b852 1992 if (needs_exec)
d0e22b4a 1993 new_pmd = kvm_s2pmd_mkexec(new_pmd);
a15f6939 1994
a0e50aa3 1995 ret = stage2_set_pmd_huge(mmu, memcache, fault_ipa, &new_pmd);
ad361f09 1996 } else {
f8df7338 1997 pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
15a49a44 1998
ad361f09 1999 if (writable) {
06485053 2000 new_pte = kvm_s2pte_mkwrite(new_pte);
15a49a44 2001 mark_page_dirty(kvm, gfn);
ad361f09 2002 }
a9c0e12e 2003
6396b852 2004 if (needs_exec)
d0e22b4a 2005 new_pte = kvm_s2pte_mkexec(new_pte);
a15f6939 2006
a0e50aa3 2007 ret = stage2_set_pte(mmu, memcache, fault_ipa, &new_pte, flags);
94f8e641 2008 }
ad361f09 2009
94f8e641 2010out_unlock:
ad361f09 2011 spin_unlock(&kvm->mmu_lock);
35307b9a 2012 kvm_set_pfn_accessed(pfn);
94f8e641 2013 kvm_release_pfn_clean(pfn);
ad361f09 2014 return ret;
94f8e641
CD
2015}
2016
aeda9130
MZ
2017/*
2018 * Resolve the access fault by making the page young again.
2019 * Note that because the faulting entry is guaranteed not to be
2020 * cached in the TLB, we don't need to invalidate anything.
06485053
CM
2021 * Only the HW Access Flag updates are supported for Stage 2 (no DBM),
2022 * so there is no need for atomic (pte|pmd)_mkyoung operations.
aeda9130
MZ
2023 */
2024static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
2025{
eb3f0624 2026 pud_t *pud;
aeda9130
MZ
2027 pmd_t *pmd;
2028 pte_t *pte;
ba049e93 2029 kvm_pfn_t pfn;
aeda9130
MZ
2030 bool pfn_valid = false;
2031
2032 trace_kvm_access_fault(fault_ipa);
2033
2034 spin_lock(&vcpu->kvm->mmu_lock);
2035
a0e50aa3 2036 if (!stage2_get_leaf_entry(vcpu->arch.hw_mmu, fault_ipa, &pud, &pmd, &pte))
aeda9130
MZ
2037 goto out;
2038
eb3f0624
PA
2039 if (pud) { /* HugeTLB */
2040 *pud = kvm_s2pud_mkyoung(*pud);
2041 pfn = kvm_pud_pfn(*pud);
2042 pfn_valid = true;
2043 } else if (pmd) { /* THP, HugeTLB */
aeda9130
MZ
2044 *pmd = pmd_mkyoung(*pmd);
2045 pfn = pmd_pfn(*pmd);
2046 pfn_valid = true;
eb3f0624
PA
2047 } else {
2048 *pte = pte_mkyoung(*pte); /* Just a page... */
2049 pfn = pte_pfn(*pte);
2050 pfn_valid = true;
aeda9130
MZ
2051 }
2052
aeda9130
MZ
2053out:
2054 spin_unlock(&vcpu->kvm->mmu_lock);
2055 if (pfn_valid)
2056 kvm_set_pfn_accessed(pfn);
2057}
2058
94f8e641
CD
2059/**
2060 * kvm_handle_guest_abort - handles all 2nd stage aborts
2061 * @vcpu: the VCPU pointer
94f8e641
CD
2062 *
2063 * Any abort that gets to the host is almost guaranteed to be caused by a
2064 * missing second stage translation table entry, which can mean that either the
2065 * guest simply needs more memory and we must allocate an appropriate page or it
2066 * can mean that the guest tried to access I/O memory, which is emulated by user
2067 * space. The distinction is based on the IPA causing the fault and whether this
2068 * memory region has been registered as standard RAM by user space.
2069 */
74cc7e0c 2070int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
342cd0ab 2071{
94f8e641
CD
2072 unsigned long fault_status;
2073 phys_addr_t fault_ipa;
2074 struct kvm_memory_slot *memslot;
98047888
CD
2075 unsigned long hva;
2076 bool is_iabt, write_fault, writable;
94f8e641
CD
2077 gfn_t gfn;
2078 int ret, idx;
2079
621f48e4
TB
2080 fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
2081
2082 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
bb428921 2083 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
621f48e4 2084
bb428921 2085 /* Synchronous External Abort? */
c9a636f2 2086 if (kvm_vcpu_abt_issea(vcpu)) {
bb428921
JM
2087 /*
2088 * For RAS the host kernel may handle this abort.
2089 * There is no need to pass the error into the guest.
2090 */
84b951a8 2091 if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu)))
bb428921 2092 kvm_inject_vabt(vcpu);
84b951a8
WD
2093
2094 return 1;
4055710b
MZ
2095 }
2096
3a949f4c 2097 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
7393b599 2098 kvm_vcpu_get_hfar(vcpu), fault_ipa);
94f8e641
CD
2099
2100 /* Check the stage-2 fault is trans. fault or write fault */
35307b9a
MZ
2101 if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
2102 fault_status != FSC_ACCESS) {
0496daa5
CD
2103 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
2104 kvm_vcpu_trap_get_class(vcpu),
2105 (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
3a949f4c 2106 (unsigned long)kvm_vcpu_get_esr(vcpu));
94f8e641
CD
2107 return -EFAULT;
2108 }
2109
2110 idx = srcu_read_lock(&vcpu->kvm->srcu);
2111
2112 gfn = fault_ipa >> PAGE_SHIFT;
98047888
CD
2113 memslot = gfn_to_memslot(vcpu->kvm, gfn);
2114 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
a7d079ce 2115 write_fault = kvm_is_write_fault(vcpu);
98047888 2116 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
022c8328
WD
2117 /*
2118 * The guest has put either its instructions or its page-tables
2119 * somewhere it shouldn't have. Userspace won't be able to do
2120 * anything about this (there's no syndrome for a start), so
2121 * re-inject the abort back into the guest.
2122 */
94f8e641 2123 if (is_iabt) {
6d674e28
MZ
2124 ret = -ENOEXEC;
2125 goto out;
94f8e641
CD
2126 }
2127
022c8328
WD
2128 if (kvm_vcpu_dabt_iss1tw(vcpu)) {
2129 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
2130 ret = 1;
2131 goto out_unlock;
2132 }
2133
57c841f1
MZ
2134 /*
2135 * Check for a cache maintenance operation. Since we
2136 * ended-up here, we know it is outside of any memory
2137 * slot. But we can't find out if that is for a device,
2138 * or if the guest is just being stupid. The only thing
2139 * we know for sure is that this range cannot be cached.
2140 *
2141 * So let's assume that the guest is just being
2142 * cautious, and skip the instruction.
2143 */
54dc0d24 2144 if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) {
57c841f1
MZ
2145 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
2146 ret = 1;
2147 goto out_unlock;
2148 }
2149
cfe3950c
MZ
2150 /*
2151 * The IPA is reported as [MAX:12], so we need to
2152 * complement it with the bottom 12 bits from the
2153 * faulting VA. This is always 12 bits, irrespective
2154 * of the page size.
2155 */
2156 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
74cc7e0c 2157 ret = io_mem_abort(vcpu, fault_ipa);
94f8e641
CD
2158 goto out_unlock;
2159 }
2160
c3058d5d 2161 /* Userspace should not be able to register out-of-bounds IPAs */
e55cac5b 2162 VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
c3058d5d 2163
aeda9130
MZ
2164 if (fault_status == FSC_ACCESS) {
2165 handle_access_fault(vcpu, fault_ipa);
2166 ret = 1;
2167 goto out_unlock;
2168 }
2169
98047888 2170 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
94f8e641
CD
2171 if (ret == 0)
2172 ret = 1;
6d674e28
MZ
2173out:
2174 if (ret == -ENOEXEC) {
2175 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
2176 ret = 1;
2177 }
94f8e641
CD
2178out_unlock:
2179 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2180 return ret;
342cd0ab
CD
2181}
2182
1d2ebacc
MZ
2183static int handle_hva_to_gpa(struct kvm *kvm,
2184 unsigned long start,
2185 unsigned long end,
2186 int (*handler)(struct kvm *kvm,
056aad67
SP
2187 gpa_t gpa, u64 size,
2188 void *data),
1d2ebacc 2189 void *data)
d5d8184d
CD
2190{
2191 struct kvm_memslots *slots;
2192 struct kvm_memory_slot *memslot;
1d2ebacc 2193 int ret = 0;
d5d8184d
CD
2194
2195 slots = kvm_memslots(kvm);
2196
2197 /* we only care about the pages that the guest sees */
2198 kvm_for_each_memslot(memslot, slots) {
2199 unsigned long hva_start, hva_end;
056aad67 2200 gfn_t gpa;
d5d8184d
CD
2201
2202 hva_start = max(start, memslot->userspace_addr);
2203 hva_end = min(end, memslot->userspace_addr +
2204 (memslot->npages << PAGE_SHIFT));
2205 if (hva_start >= hva_end)
2206 continue;
2207
056aad67
SP
2208 gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
2209 ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
d5d8184d 2210 }
1d2ebacc
MZ
2211
2212 return ret;
d5d8184d
CD
2213}
2214
056aad67 2215static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
d5d8184d 2216{
a0e50aa3 2217 unmap_stage2_range(&kvm->arch.mmu, gpa, size);
1d2ebacc 2218 return 0;
d5d8184d
CD
2219}
2220
d5d8184d
CD
2221int kvm_unmap_hva_range(struct kvm *kvm,
2222 unsigned long start, unsigned long end)
2223{
a0e50aa3 2224 if (!kvm->arch.mmu.pgd)
d5d8184d
CD
2225 return 0;
2226
2227 trace_kvm_unmap_hva_range(start, end);
2228 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
2229 return 0;
2230}
2231
056aad67 2232static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
d5d8184d
CD
2233{
2234 pte_t *pte = (pte_t *)data;
2235
056aad67 2236 WARN_ON(size != PAGE_SIZE);
15a49a44
MS
2237 /*
2238 * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
2239 * flag clear because MMU notifiers will have unmapped a huge PMD before
2240 * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
2241 * therefore stage2_set_pte() never needs to clear out a huge PMD
2242 * through this calling path.
2243 */
a0e50aa3 2244 stage2_set_pte(&kvm->arch.mmu, NULL, gpa, pte, 0);
1d2ebacc 2245 return 0;
d5d8184d
CD
2246}
2247
2248
748c0e31 2249int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
d5d8184d
CD
2250{
2251 unsigned long end = hva + PAGE_SIZE;
694556d5 2252 kvm_pfn_t pfn = pte_pfn(pte);
d5d8184d
CD
2253 pte_t stage2_pte;
2254
a0e50aa3 2255 if (!kvm->arch.mmu.pgd)
748c0e31 2256 return 0;
d5d8184d
CD
2257
2258 trace_kvm_set_spte_hva(hva);
694556d5
MZ
2259
2260 /*
2261 * We've moved a page around, probably through CoW, so let's treat it
2262 * just like a translation fault and clean the cache to the PoC.
2263 */
2264 clean_dcache_guest_page(pfn, PAGE_SIZE);
f8df7338 2265 stage2_pte = kvm_pfn_pte(pfn, PAGE_S2);
d5d8184d 2266 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
748c0e31
LT
2267
2268 return 0;
d5d8184d
CD
2269}
2270
056aad67 2271static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
35307b9a 2272{
35a63966 2273 pud_t *pud;
35307b9a
MZ
2274 pmd_t *pmd;
2275 pte_t *pte;
2276
35a63966 2277 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
a0e50aa3 2278 if (!stage2_get_leaf_entry(&kvm->arch.mmu, gpa, &pud, &pmd, &pte))
35307b9a
MZ
2279 return 0;
2280
35a63966
PA
2281 if (pud)
2282 return stage2_pudp_test_and_clear_young(pud);
2283 else if (pmd)
06485053 2284 return stage2_pmdp_test_and_clear_young(pmd);
35a63966
PA
2285 else
2286 return stage2_ptep_test_and_clear_young(pte);
35307b9a
MZ
2287}
2288
056aad67 2289static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
35307b9a 2290{
35a63966 2291 pud_t *pud;
35307b9a
MZ
2292 pmd_t *pmd;
2293 pte_t *pte;
2294
35a63966 2295 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
a0e50aa3 2296 if (!stage2_get_leaf_entry(&kvm->arch.mmu, gpa, &pud, &pmd, &pte))
35307b9a
MZ
2297 return 0;
2298
35a63966
PA
2299 if (pud)
2300 return kvm_s2pud_young(*pud);
2301 else if (pmd)
35307b9a 2302 return pmd_young(*pmd);
35a63966 2303 else
35307b9a 2304 return pte_young(*pte);
35307b9a
MZ
2305}
2306
2307int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
2308{
a0e50aa3 2309 if (!kvm->arch.mmu.pgd)
7e5a6722 2310 return 0;
35307b9a
MZ
2311 trace_kvm_age_hva(start, end);
2312 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
2313}
2314
2315int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
2316{
a0e50aa3 2317 if (!kvm->arch.mmu.pgd)
7e5a6722 2318 return 0;
35307b9a 2319 trace_kvm_test_age_hva(hva);
cf2d23e0
GS
2320 return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE,
2321 kvm_test_age_hva_handler, NULL);
35307b9a
MZ
2322}
2323
d5d8184d
CD
2324void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
2325{
c1a33aeb 2326 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
d5d8184d
CD
2327}
2328
342cd0ab
CD
2329phys_addr_t kvm_mmu_get_httbr(void)
2330{
e4c5a685
AB
2331 if (__kvm_cpu_uses_extended_idmap())
2332 return virt_to_phys(merged_hyp_pgd);
2333 else
2334 return virt_to_phys(hyp_pgd);
342cd0ab
CD
2335}
2336
5a677ce0
MZ
2337phys_addr_t kvm_get_idmap_vector(void)
2338{
2339 return hyp_idmap_vector;
2340}
2341
0535a3e2
MZ
2342static int kvm_map_idmap_text(pgd_t *pgd)
2343{
2344 int err;
2345
2346 /* Create the idmap in the boot page tables */
98732d1b 2347 err = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
0535a3e2
MZ
2348 hyp_idmap_start, hyp_idmap_end,
2349 __phys_to_pfn(hyp_idmap_start),
2350 PAGE_HYP_EXEC);
2351 if (err)
2352 kvm_err("Failed to idmap %lx-%lx\n",
2353 hyp_idmap_start, hyp_idmap_end);
2354
2355 return err;
2356}
2357
342cd0ab
CD
2358int kvm_mmu_init(void)
2359{
2fb41059
MZ
2360 int err;
2361
0a78791c 2362 hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start);
46fef158 2363 hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
0a78791c 2364 hyp_idmap_end = __pa_symbol(__hyp_idmap_text_end);
46fef158 2365 hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE);
0a78791c 2366 hyp_idmap_vector = __pa_symbol(__kvm_hyp_init);
5a677ce0 2367
06f75a1f
AB
2368 /*
2369 * We rely on the linker script to ensure at build time that the HYP
2370 * init code does not cross a page boundary.
2371 */
2372 BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
5a677ce0 2373
b4ef0499
MZ
2374 kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
2375 kvm_debug("HYP VA range: %lx:%lx\n",
2376 kern_hyp_va(PAGE_OFFSET),
2377 kern_hyp_va((unsigned long)high_memory - 1));
eac378a9 2378
6c41a413 2379 if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
ed57cac8 2380 hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) &&
d2896d4b 2381 hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
eac378a9
MZ
2382 /*
2383 * The idmap page is intersecting with the VA space,
2384 * it is not safe to continue further.
2385 */
2386 kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
2387 err = -EINVAL;
2388 goto out;
2389 }
2390
38f791a4 2391 hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
0535a3e2 2392 if (!hyp_pgd) {
d5d8184d 2393 kvm_err("Hyp mode PGD not allocated\n");
2fb41059
MZ
2394 err = -ENOMEM;
2395 goto out;
2396 }
2397
0535a3e2
MZ
2398 if (__kvm_cpu_uses_extended_idmap()) {
2399 boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2400 hyp_pgd_order);
2401 if (!boot_hyp_pgd) {
2402 kvm_err("Hyp boot PGD not allocated\n");
2403 err = -ENOMEM;
2404 goto out;
2405 }
2fb41059 2406
0535a3e2
MZ
2407 err = kvm_map_idmap_text(boot_hyp_pgd);
2408 if (err)
2409 goto out;
d5d8184d 2410
e4c5a685
AB
2411 merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
2412 if (!merged_hyp_pgd) {
2413 kvm_err("Failed to allocate extra HYP pgd\n");
2414 goto out;
2415 }
2416 __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
2417 hyp_idmap_start);
0535a3e2
MZ
2418 } else {
2419 err = kvm_map_idmap_text(hyp_pgd);
2420 if (err)
2421 goto out;
5a677ce0
MZ
2422 }
2423
e3f019b3 2424 io_map_base = hyp_idmap_start;
d5d8184d 2425 return 0;
2fb41059 2426out:
4f728276 2427 free_hyp_pgds();
2fb41059 2428 return err;
342cd0ab 2429}
df6ce24f
EA
2430
2431void kvm_arch_commit_memory_region(struct kvm *kvm,
09170a49 2432 const struct kvm_userspace_memory_region *mem,
9d4c197c 2433 struct kvm_memory_slot *old,
f36f3f28 2434 const struct kvm_memory_slot *new,
df6ce24f
EA
2435 enum kvm_mr_change change)
2436{
c6473555
MS
2437 /*
2438 * At this point memslot has been committed and there is an
656012c7 2439 * allocated dirty_bitmap[], dirty pages will be tracked while the
c6473555
MS
2440 * memory slot is write protected.
2441 */
c862626e
KZ
2442 if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
2443 /*
2444 * If we're with initial-all-set, we don't need to write
2445 * protect any pages because they're all reported as dirty.
2446 * Huge pages and normal pages will be write protect gradually.
2447 */
2448 if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) {
2449 kvm_mmu_wp_memory_region(kvm, mem->slot);
2450 }
2451 }
df6ce24f
EA
2452}
2453
2454int kvm_arch_prepare_memory_region(struct kvm *kvm,
2455 struct kvm_memory_slot *memslot,
09170a49 2456 const struct kvm_userspace_memory_region *mem,
df6ce24f
EA
2457 enum kvm_mr_change change)
2458{
8eef9123
AB
2459 hva_t hva = mem->userspace_addr;
2460 hva_t reg_end = hva + mem->memory_size;
2461 bool writable = !(mem->flags & KVM_MEM_READONLY);
2462 int ret = 0;
2463
15a49a44
MS
2464 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
2465 change != KVM_MR_FLAGS_ONLY)
8eef9123
AB
2466 return 0;
2467
c3058d5d
CD
2468 /*
2469 * Prevent userspace from creating a memory region outside of the IPA
2470 * space addressable by the KVM guest IPA space.
2471 */
2472 if (memslot->base_gfn + memslot->npages >=
e55cac5b 2473 (kvm_phys_size(kvm) >> PAGE_SHIFT))
c3058d5d
CD
2474 return -EFAULT;
2475
89154dd5 2476 mmap_read_lock(current->mm);
8eef9123
AB
2477 /*
2478 * A memory region could potentially cover multiple VMAs, and any holes
2479 * between them, so iterate over all of them to find out if we can map
2480 * any of them right now.
2481 *
2482 * +--------------------------------------------+
2483 * +---------------+----------------+ +----------------+
2484 * | : VMA 1 | VMA 2 | | VMA 3 : |
2485 * +---------------+----------------+ +----------------+
2486 * | memory region |
2487 * +--------------------------------------------+
2488 */
2489 do {
2490 struct vm_area_struct *vma = find_vma(current->mm, hva);
2491 hva_t vm_start, vm_end;
2492
2493 if (!vma || vma->vm_start >= reg_end)
2494 break;
2495
8eef9123
AB
2496 /*
2497 * Take the intersection of this VMA with the memory region
2498 */
2499 vm_start = max(hva, vma->vm_start);
2500 vm_end = min(reg_end, vma->vm_end);
2501
2502 if (vma->vm_flags & VM_PFNMAP) {
2503 gpa_t gpa = mem->guest_phys_addr +
2504 (vm_start - mem->userspace_addr);
ca09f02f
MM
2505 phys_addr_t pa;
2506
2507 pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
2508 pa += vm_start - vma->vm_start;
8eef9123 2509
15a49a44 2510 /* IO region dirty page logging not allowed */
72f31048
MZ
2511 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
2512 ret = -EINVAL;
2513 goto out;
2514 }
15a49a44 2515
8eef9123
AB
2516 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
2517 vm_end - vm_start,
2518 writable);
2519 if (ret)
2520 break;
2521 }
2522 hva = vm_end;
2523 } while (hva < reg_end);
2524
15a49a44 2525 if (change == KVM_MR_FLAGS_ONLY)
72f31048 2526 goto out;
15a49a44 2527
849260c7
AB
2528 spin_lock(&kvm->mmu_lock);
2529 if (ret)
a0e50aa3 2530 unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr, mem->memory_size);
849260c7
AB
2531 else
2532 stage2_flush_memslot(kvm, memslot);
2533 spin_unlock(&kvm->mmu_lock);
72f31048 2534out:
89154dd5 2535 mmap_read_unlock(current->mm);
8eef9123 2536 return ret;
df6ce24f
EA
2537}
2538
e96c81ee 2539void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
df6ce24f
EA
2540{
2541}
2542
15248258 2543void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
df6ce24f
EA
2544{
2545}
2546
2547void kvm_arch_flush_shadow_all(struct kvm *kvm)
2548{
a0e50aa3 2549 kvm_free_stage2_pgd(&kvm->arch.mmu);
df6ce24f
EA
2550}
2551
2552void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
2553 struct kvm_memory_slot *slot)
2554{
8eef9123
AB
2555 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
2556 phys_addr_t size = slot->npages << PAGE_SHIFT;
2557
2558 spin_lock(&kvm->mmu_lock);
a0e50aa3 2559 unmap_stage2_range(&kvm->arch.mmu, gpa, size);
8eef9123 2560 spin_unlock(&kvm->mmu_lock);
df6ce24f 2561}
3c1e7165
MZ
2562
2563/*
2564 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
2565 *
2566 * Main problems:
2567 * - S/W ops are local to a CPU (not broadcast)
2568 * - We have line migration behind our back (speculation)
2569 * - System caches don't support S/W at all (damn!)
2570 *
2571 * In the face of the above, the best we can do is to try and convert
2572 * S/W ops to VA ops. Because the guest is not allowed to infer the
2573 * S/W to PA mapping, it can only use S/W to nuke the whole cache,
2574 * which is a rather good thing for us.
2575 *
2576 * Also, it is only used when turning caches on/off ("The expected
2577 * usage of the cache maintenance instructions that operate by set/way
2578 * is associated with the cache maintenance instructions associated
2579 * with the powerdown and powerup of caches, if this is required by
2580 * the implementation.").
2581 *
2582 * We use the following policy:
2583 *
2584 * - If we trap a S/W operation, we enable VM trapping to detect
2585 * caches being turned on/off, and do a full clean.
2586 *
2587 * - We flush the caches on both caches being turned on and off.
2588 *
2589 * - Once the caches are enabled, we stop trapping VM ops.
2590 */
2591void kvm_set_way_flush(struct kvm_vcpu *vcpu)
2592{
3df59d8d 2593 unsigned long hcr = *vcpu_hcr(vcpu);
3c1e7165
MZ
2594
2595 /*
2596 * If this is the first time we do a S/W operation
2597 * (i.e. HCR_TVM not set) flush the whole memory, and set the
2598 * VM trapping.
2599 *
2600 * Otherwise, rely on the VM trapping to wait for the MMU +
2601 * Caches to be turned off. At that point, we'll be able to
2602 * clean the caches again.
2603 */
2604 if (!(hcr & HCR_TVM)) {
2605 trace_kvm_set_way_flush(*vcpu_pc(vcpu),
2606 vcpu_has_cache_enabled(vcpu));
2607 stage2_flush_vm(vcpu->kvm);
3df59d8d 2608 *vcpu_hcr(vcpu) = hcr | HCR_TVM;
3c1e7165
MZ
2609 }
2610}
2611
2612void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
2613{
2614 bool now_enabled = vcpu_has_cache_enabled(vcpu);
2615
2616 /*
2617 * If switching the MMU+caches on, need to invalidate the caches.
2618 * If switching it off, need to clean the caches.
2619 * Clean + invalidate does the trick always.
2620 */
2621 if (now_enabled != was_enabled)
2622 stage2_flush_vm(vcpu->kvm);
2623
2624 /* Caches are now on, stop trapping VM ops (until a S/W op) */
2625 if (now_enabled)
3df59d8d 2626 *vcpu_hcr(vcpu) &= ~HCR_TVM;
3c1e7165
MZ
2627
2628 trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
2629}