Merge tag 'intel-pinctrl-v6.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / arch / arm64 / kvm / hyp / pgtable.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Stand-alone page-table allocator for hyp stage-1 and guest stage-2.
4  * No bombay mix was harmed in the writing of this file.
5  *
6  * Copyright (C) 2020 Google LLC
7  * Author: Will Deacon <will@kernel.org>
8  */
9
10 #include <linux/bitfield.h>
11 #include <asm/kvm_pgtable.h>
12 #include <asm/stage2_pgtable.h>
13
14
15 #define KVM_PTE_TYPE                    BIT(1)
16 #define KVM_PTE_TYPE_BLOCK              0
17 #define KVM_PTE_TYPE_PAGE               1
18 #define KVM_PTE_TYPE_TABLE              1
19
20 #define KVM_PTE_LEAF_ATTR_LO            GENMASK(11, 2)
21
22 #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
23 #define KVM_PTE_LEAF_ATTR_LO_S1_AP      GENMASK(7, 6)
24 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO           \
25         ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 2 : 3; })
26 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW           \
27         ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 0 : 1; })
28 #define KVM_PTE_LEAF_ATTR_LO_S1_SH      GENMASK(9, 8)
29 #define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS   3
30 #define KVM_PTE_LEAF_ATTR_LO_S1_AF      BIT(10)
31
32 #define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR GENMASK(5, 2)
33 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R  BIT(6)
34 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W  BIT(7)
35 #define KVM_PTE_LEAF_ATTR_LO_S2_SH      GENMASK(9, 8)
36 #define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS   3
37 #define KVM_PTE_LEAF_ATTR_LO_S2_AF      BIT(10)
38
39 #define KVM_PTE_LEAF_ATTR_HI            GENMASK(63, 50)
40
41 #define KVM_PTE_LEAF_ATTR_HI_SW         GENMASK(58, 55)
42
43 #define KVM_PTE_LEAF_ATTR_HI_S1_XN      BIT(54)
44
45 #define KVM_PTE_LEAF_ATTR_HI_S2_XN      BIT(54)
46
47 #define KVM_PTE_LEAF_ATTR_HI_S1_GP      BIT(50)
48
49 #define KVM_PTE_LEAF_ATTR_S2_PERMS      (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
50                                          KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
51                                          KVM_PTE_LEAF_ATTR_HI_S2_XN)
52
53 #define KVM_INVALID_PTE_OWNER_MASK      GENMASK(9, 2)
54 #define KVM_MAX_OWNER_ID                1
55
56 /*
57  * Used to indicate a pte for which a 'break-before-make' sequence is in
58  * progress.
59  */
60 #define KVM_INVALID_PTE_LOCKED          BIT(10)
61
62 struct kvm_pgtable_walk_data {
63         struct kvm_pgtable_walker       *walker;
64
65         const u64                       start;
66         u64                             addr;
67         const u64                       end;
68 };
69
70 static bool kvm_pgtable_walk_skip_bbm_tlbi(const struct kvm_pgtable_visit_ctx *ctx)
71 {
72         return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_BBM_TLBI);
73 }
74
75 static bool kvm_pgtable_walk_skip_cmo(const struct kvm_pgtable_visit_ctx *ctx)
76 {
77         return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_CMO);
78 }
79
80 static bool kvm_phys_is_valid(u64 phys)
81 {
82         u64 parange_max = kvm_get_parange_max();
83         u8 shift = id_aa64mmfr0_parange_to_phys_shift(parange_max);
84
85         return phys < BIT(shift);
86 }
87
88 static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, u64 phys)
89 {
90         u64 granule = kvm_granule_size(ctx->level);
91
92         if (!kvm_level_supports_block_mapping(ctx->level))
93                 return false;
94
95         if (granule > (ctx->end - ctx->addr))
96                 return false;
97
98         if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule))
99                 return false;
100
101         return IS_ALIGNED(ctx->addr, granule);
102 }
103
104 static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, s8 level)
105 {
106         u64 shift = kvm_granule_shift(level);
107         u64 mask = BIT(PAGE_SHIFT - 3) - 1;
108
109         return (data->addr >> shift) & mask;
110 }
111
112 static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
113 {
114         u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */
115         u64 mask = BIT(pgt->ia_bits) - 1;
116
117         return (addr & mask) >> shift;
118 }
119
120 static u32 kvm_pgd_pages(u32 ia_bits, s8 start_level)
121 {
122         struct kvm_pgtable pgt = {
123                 .ia_bits        = ia_bits,
124                 .start_level    = start_level,
125         };
126
127         return kvm_pgd_page_idx(&pgt, -1ULL) + 1;
128 }
129
130 static bool kvm_pte_table(kvm_pte_t pte, s8 level)
131 {
132         if (level == KVM_PGTABLE_LAST_LEVEL)
133                 return false;
134
135         if (!kvm_pte_valid(pte))
136                 return false;
137
138         return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE;
139 }
140
141 static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
142 {
143         return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
144 }
145
146 static void kvm_clear_pte(kvm_pte_t *ptep)
147 {
148         WRITE_ONCE(*ptep, 0);
149 }
150
151 static kvm_pte_t kvm_init_table_pte(kvm_pte_t *childp, struct kvm_pgtable_mm_ops *mm_ops)
152 {
153         kvm_pte_t pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
154
155         pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE);
156         pte |= KVM_PTE_VALID;
157         return pte;
158 }
159
160 static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, s8 level)
161 {
162         kvm_pte_t pte = kvm_phys_to_pte(pa);
163         u64 type = (level == KVM_PGTABLE_LAST_LEVEL) ? KVM_PTE_TYPE_PAGE :
164                                                        KVM_PTE_TYPE_BLOCK;
165
166         pte |= attr & (KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI);
167         pte |= FIELD_PREP(KVM_PTE_TYPE, type);
168         pte |= KVM_PTE_VALID;
169
170         return pte;
171 }
172
173 static kvm_pte_t kvm_init_invalid_leaf_owner(u8 owner_id)
174 {
175         return FIELD_PREP(KVM_INVALID_PTE_OWNER_MASK, owner_id);
176 }
177
178 static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data,
179                                   const struct kvm_pgtable_visit_ctx *ctx,
180                                   enum kvm_pgtable_walk_flags visit)
181 {
182         struct kvm_pgtable_walker *walker = data->walker;
183
184         /* Ensure the appropriate lock is held (e.g. RCU lock for stage-2 MMU) */
185         WARN_ON_ONCE(kvm_pgtable_walk_shared(ctx) && !kvm_pgtable_walk_lock_held());
186         return walker->cb(ctx, visit);
187 }
188
189 static bool kvm_pgtable_walk_continue(const struct kvm_pgtable_walker *walker,
190                                       int r)
191 {
192         /*
193          * Visitor callbacks return EAGAIN when the conditions that led to a
194          * fault are no longer reflected in the page tables due to a race to
195          * update a PTE. In the context of a fault handler this is interpreted
196          * as a signal to retry guest execution.
197          *
198          * Ignore the return code altogether for walkers outside a fault handler
199          * (e.g. write protecting a range of memory) and chug along with the
200          * page table walk.
201          */
202         if (r == -EAGAIN)
203                 return !(walker->flags & KVM_PGTABLE_WALK_HANDLE_FAULT);
204
205         return !r;
206 }
207
208 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
209                               struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level);
210
211 static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
212                                       struct kvm_pgtable_mm_ops *mm_ops,
213                                       kvm_pteref_t pteref, s8 level)
214 {
215         enum kvm_pgtable_walk_flags flags = data->walker->flags;
216         kvm_pte_t *ptep = kvm_dereference_pteref(data->walker, pteref);
217         struct kvm_pgtable_visit_ctx ctx = {
218                 .ptep   = ptep,
219                 .old    = READ_ONCE(*ptep),
220                 .arg    = data->walker->arg,
221                 .mm_ops = mm_ops,
222                 .start  = data->start,
223                 .addr   = data->addr,
224                 .end    = data->end,
225                 .level  = level,
226                 .flags  = flags,
227         };
228         int ret = 0;
229         bool reload = false;
230         kvm_pteref_t childp;
231         bool table = kvm_pte_table(ctx.old, level);
232
233         if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
234                 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE);
235                 reload = true;
236         }
237
238         if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) {
239                 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF);
240                 reload = true;
241         }
242
243         /*
244          * Reload the page table after invoking the walker callback for leaf
245          * entries or after pre-order traversal, to allow the walker to descend
246          * into a newly installed or replaced table.
247          */
248         if (reload) {
249                 ctx.old = READ_ONCE(*ptep);
250                 table = kvm_pte_table(ctx.old, level);
251         }
252
253         if (!kvm_pgtable_walk_continue(data->walker, ret))
254                 goto out;
255
256         if (!table) {
257                 data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
258                 data->addr += kvm_granule_size(level);
259                 goto out;
260         }
261
262         childp = (kvm_pteref_t)kvm_pte_follow(ctx.old, mm_ops);
263         ret = __kvm_pgtable_walk(data, mm_ops, childp, level + 1);
264         if (!kvm_pgtable_walk_continue(data->walker, ret))
265                 goto out;
266
267         if (ctx.flags & KVM_PGTABLE_WALK_TABLE_POST)
268                 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_POST);
269
270 out:
271         if (kvm_pgtable_walk_continue(data->walker, ret))
272                 return 0;
273
274         return ret;
275 }
276
277 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
278                               struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level)
279 {
280         u32 idx;
281         int ret = 0;
282
283         if (WARN_ON_ONCE(level < KVM_PGTABLE_FIRST_LEVEL ||
284                          level > KVM_PGTABLE_LAST_LEVEL))
285                 return -EINVAL;
286
287         for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) {
288                 kvm_pteref_t pteref = &pgtable[idx];
289
290                 if (data->addr >= data->end)
291                         break;
292
293                 ret = __kvm_pgtable_visit(data, mm_ops, pteref, level);
294                 if (ret)
295                         break;
296         }
297
298         return ret;
299 }
300
301 static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data)
302 {
303         u32 idx;
304         int ret = 0;
305         u64 limit = BIT(pgt->ia_bits);
306
307         if (data->addr > limit || data->end > limit)
308                 return -ERANGE;
309
310         if (!pgt->pgd)
311                 return -EINVAL;
312
313         for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; ++idx) {
314                 kvm_pteref_t pteref = &pgt->pgd[idx * PTRS_PER_PTE];
315
316                 ret = __kvm_pgtable_walk(data, pgt->mm_ops, pteref, pgt->start_level);
317                 if (ret)
318                         break;
319         }
320
321         return ret;
322 }
323
324 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
325                      struct kvm_pgtable_walker *walker)
326 {
327         struct kvm_pgtable_walk_data walk_data = {
328                 .start  = ALIGN_DOWN(addr, PAGE_SIZE),
329                 .addr   = ALIGN_DOWN(addr, PAGE_SIZE),
330                 .end    = PAGE_ALIGN(walk_data.addr + size),
331                 .walker = walker,
332         };
333         int r;
334
335         r = kvm_pgtable_walk_begin(walker);
336         if (r)
337                 return r;
338
339         r = _kvm_pgtable_walk(pgt, &walk_data);
340         kvm_pgtable_walk_end(walker);
341
342         return r;
343 }
344
345 struct leaf_walk_data {
346         kvm_pte_t       pte;
347         s8              level;
348 };
349
350 static int leaf_walker(const struct kvm_pgtable_visit_ctx *ctx,
351                        enum kvm_pgtable_walk_flags visit)
352 {
353         struct leaf_walk_data *data = ctx->arg;
354
355         data->pte   = ctx->old;
356         data->level = ctx->level;
357
358         return 0;
359 }
360
361 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
362                          kvm_pte_t *ptep, s8 *level)
363 {
364         struct leaf_walk_data data;
365         struct kvm_pgtable_walker walker = {
366                 .cb     = leaf_walker,
367                 .flags  = KVM_PGTABLE_WALK_LEAF,
368                 .arg    = &data,
369         };
370         int ret;
371
372         ret = kvm_pgtable_walk(pgt, ALIGN_DOWN(addr, PAGE_SIZE),
373                                PAGE_SIZE, &walker);
374         if (!ret) {
375                 if (ptep)
376                         *ptep  = data.pte;
377                 if (level)
378                         *level = data.level;
379         }
380
381         return ret;
382 }
383
384 struct hyp_map_data {
385         const u64                       phys;
386         kvm_pte_t                       attr;
387 };
388
389 static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
390 {
391         bool device = prot & KVM_PGTABLE_PROT_DEVICE;
392         u32 mtype = device ? MT_DEVICE_nGnRE : MT_NORMAL;
393         kvm_pte_t attr = FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX, mtype);
394         u32 sh = KVM_PTE_LEAF_ATTR_LO_S1_SH_IS;
395         u32 ap = (prot & KVM_PGTABLE_PROT_W) ? KVM_PTE_LEAF_ATTR_LO_S1_AP_RW :
396                                                KVM_PTE_LEAF_ATTR_LO_S1_AP_RO;
397
398         if (!(prot & KVM_PGTABLE_PROT_R))
399                 return -EINVAL;
400
401         if (prot & KVM_PGTABLE_PROT_X) {
402                 if (prot & KVM_PGTABLE_PROT_W)
403                         return -EINVAL;
404
405                 if (device)
406                         return -EINVAL;
407
408                 if (system_supports_bti_kernel())
409                         attr |= KVM_PTE_LEAF_ATTR_HI_S1_GP;
410         } else {
411                 attr |= KVM_PTE_LEAF_ATTR_HI_S1_XN;
412         }
413
414         attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_AP, ap);
415         if (!kvm_lpa2_is_enabled())
416                 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_SH, sh);
417         attr |= KVM_PTE_LEAF_ATTR_LO_S1_AF;
418         attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
419         *ptep = attr;
420
421         return 0;
422 }
423
424 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
425 {
426         enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW;
427         u32 ap;
428
429         if (!kvm_pte_valid(pte))
430                 return prot;
431
432         if (!(pte & KVM_PTE_LEAF_ATTR_HI_S1_XN))
433                 prot |= KVM_PGTABLE_PROT_X;
434
435         ap = FIELD_GET(KVM_PTE_LEAF_ATTR_LO_S1_AP, pte);
436         if (ap == KVM_PTE_LEAF_ATTR_LO_S1_AP_RO)
437                 prot |= KVM_PGTABLE_PROT_R;
438         else if (ap == KVM_PTE_LEAF_ATTR_LO_S1_AP_RW)
439                 prot |= KVM_PGTABLE_PROT_RW;
440
441         return prot;
442 }
443
444 static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
445                                     struct hyp_map_data *data)
446 {
447         u64 phys = data->phys + (ctx->addr - ctx->start);
448         kvm_pte_t new;
449
450         if (!kvm_block_mapping_supported(ctx, phys))
451                 return false;
452
453         new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
454         if (ctx->old == new)
455                 return true;
456         if (!kvm_pte_valid(ctx->old))
457                 ctx->mm_ops->get_page(ctx->ptep);
458         else if (WARN_ON((ctx->old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW))
459                 return false;
460
461         smp_store_release(ctx->ptep, new);
462         return true;
463 }
464
465 static int hyp_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
466                           enum kvm_pgtable_walk_flags visit)
467 {
468         kvm_pte_t *childp, new;
469         struct hyp_map_data *data = ctx->arg;
470         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
471
472         if (hyp_map_walker_try_leaf(ctx, data))
473                 return 0;
474
475         if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL))
476                 return -EINVAL;
477
478         childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
479         if (!childp)
480                 return -ENOMEM;
481
482         new = kvm_init_table_pte(childp, mm_ops);
483         mm_ops->get_page(ctx->ptep);
484         smp_store_release(ctx->ptep, new);
485
486         return 0;
487 }
488
489 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
490                         enum kvm_pgtable_prot prot)
491 {
492         int ret;
493         struct hyp_map_data map_data = {
494                 .phys   = ALIGN_DOWN(phys, PAGE_SIZE),
495         };
496         struct kvm_pgtable_walker walker = {
497                 .cb     = hyp_map_walker,
498                 .flags  = KVM_PGTABLE_WALK_LEAF,
499                 .arg    = &map_data,
500         };
501
502         ret = hyp_set_prot_attr(prot, &map_data.attr);
503         if (ret)
504                 return ret;
505
506         ret = kvm_pgtable_walk(pgt, addr, size, &walker);
507         dsb(ishst);
508         isb();
509         return ret;
510 }
511
512 static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
513                             enum kvm_pgtable_walk_flags visit)
514 {
515         kvm_pte_t *childp = NULL;
516         u64 granule = kvm_granule_size(ctx->level);
517         u64 *unmapped = ctx->arg;
518         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
519
520         if (!kvm_pte_valid(ctx->old))
521                 return -EINVAL;
522
523         if (kvm_pte_table(ctx->old, ctx->level)) {
524                 childp = kvm_pte_follow(ctx->old, mm_ops);
525
526                 if (mm_ops->page_count(childp) != 1)
527                         return 0;
528
529                 kvm_clear_pte(ctx->ptep);
530                 dsb(ishst);
531                 __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
532         } else {
533                 if (ctx->end - ctx->addr < granule)
534                         return -EINVAL;
535
536                 kvm_clear_pte(ctx->ptep);
537                 dsb(ishst);
538                 __tlbi_level(vale2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
539                 *unmapped += granule;
540         }
541
542         dsb(ish);
543         isb();
544         mm_ops->put_page(ctx->ptep);
545
546         if (childp)
547                 mm_ops->put_page(childp);
548
549         return 0;
550 }
551
552 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
553 {
554         u64 unmapped = 0;
555         struct kvm_pgtable_walker walker = {
556                 .cb     = hyp_unmap_walker,
557                 .arg    = &unmapped,
558                 .flags  = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
559         };
560
561         if (!pgt->mm_ops->page_count)
562                 return 0;
563
564         kvm_pgtable_walk(pgt, addr, size, &walker);
565         return unmapped;
566 }
567
568 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
569                          struct kvm_pgtable_mm_ops *mm_ops)
570 {
571         s8 start_level = KVM_PGTABLE_LAST_LEVEL + 1 -
572                          ARM64_HW_PGTABLE_LEVELS(va_bits);
573
574         if (start_level < KVM_PGTABLE_FIRST_LEVEL ||
575             start_level > KVM_PGTABLE_LAST_LEVEL)
576                 return -EINVAL;
577
578         pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_page(NULL);
579         if (!pgt->pgd)
580                 return -ENOMEM;
581
582         pgt->ia_bits            = va_bits;
583         pgt->start_level        = start_level;
584         pgt->mm_ops             = mm_ops;
585         pgt->mmu                = NULL;
586         pgt->force_pte_cb       = NULL;
587
588         return 0;
589 }
590
591 static int hyp_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
592                            enum kvm_pgtable_walk_flags visit)
593 {
594         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
595
596         if (!kvm_pte_valid(ctx->old))
597                 return 0;
598
599         mm_ops->put_page(ctx->ptep);
600
601         if (kvm_pte_table(ctx->old, ctx->level))
602                 mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops));
603
604         return 0;
605 }
606
607 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
608 {
609         struct kvm_pgtable_walker walker = {
610                 .cb     = hyp_free_walker,
611                 .flags  = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
612         };
613
614         WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
615         pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd));
616         pgt->pgd = NULL;
617 }
618
619 struct stage2_map_data {
620         const u64                       phys;
621         kvm_pte_t                       attr;
622         u8                              owner_id;
623
624         kvm_pte_t                       *anchor;
625         kvm_pte_t                       *childp;
626
627         struct kvm_s2_mmu               *mmu;
628         void                            *memcache;
629
630         /* Force mappings to page granularity */
631         bool                            force_pte;
632 };
633
634 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
635 {
636         u64 vtcr = VTCR_EL2_FLAGS;
637         s8 lvls;
638
639         vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT;
640         vtcr |= VTCR_EL2_T0SZ(phys_shift);
641         /*
642          * Use a minimum 2 level page table to prevent splitting
643          * host PMD huge pages at stage2.
644          */
645         lvls = stage2_pgtable_levels(phys_shift);
646         if (lvls < 2)
647                 lvls = 2;
648
649         /*
650          * When LPA2 is enabled, the HW supports an extra level of translation
651          * (for 5 in total) when using 4K pages. It also introduces VTCR_EL2.SL2
652          * to as an addition to SL0 to enable encoding this extra start level.
653          * However, since we always use concatenated pages for the first level
654          * lookup, we will never need this extra level and therefore do not need
655          * to touch SL2.
656          */
657         vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
658
659 #ifdef CONFIG_ARM64_HW_AFDBM
660         /*
661          * Enable the Hardware Access Flag management, unconditionally
662          * on all CPUs. In systems that have asymmetric support for the feature
663          * this allows KVM to leverage hardware support on the subset of cores
664          * that implement the feature.
665          *
666          * The architecture requires VTCR_EL2.HA to be RES0 (thus ignored by
667          * hardware) on implementations that do not advertise support for the
668          * feature. As such, setting HA unconditionally is safe, unless you
669          * happen to be running on a design that has unadvertised support for
670          * HAFDBS. Here be dragons.
671          */
672         if (!cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
673                 vtcr |= VTCR_EL2_HA;
674 #endif /* CONFIG_ARM64_HW_AFDBM */
675
676         if (kvm_lpa2_is_enabled())
677                 vtcr |= VTCR_EL2_DS;
678
679         /* Set the vmid bits */
680         vtcr |= (get_vmid_bits(mmfr1) == 16) ?
681                 VTCR_EL2_VS_16BIT :
682                 VTCR_EL2_VS_8BIT;
683
684         return vtcr;
685 }
686
687 static bool stage2_has_fwb(struct kvm_pgtable *pgt)
688 {
689         if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
690                 return false;
691
692         return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
693 }
694
695 void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
696                                 phys_addr_t addr, size_t size)
697 {
698         unsigned long pages, inval_pages;
699
700         if (!system_supports_tlb_range()) {
701                 kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
702                 return;
703         }
704
705         pages = size >> PAGE_SHIFT;
706         while (pages > 0) {
707                 inval_pages = min(pages, MAX_TLBI_RANGE_PAGES);
708                 kvm_call_hyp(__kvm_tlb_flush_vmid_range, mmu, addr, inval_pages);
709
710                 addr += inval_pages << PAGE_SHIFT;
711                 pages -= inval_pages;
712         }
713 }
714
715 #define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
716
717 static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
718                                 kvm_pte_t *ptep)
719 {
720         kvm_pte_t attr;
721         u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
722
723         switch (prot & (KVM_PGTABLE_PROT_DEVICE |
724                         KVM_PGTABLE_PROT_NORMAL_NC)) {
725         case KVM_PGTABLE_PROT_DEVICE | KVM_PGTABLE_PROT_NORMAL_NC:
726                 return -EINVAL;
727         case KVM_PGTABLE_PROT_DEVICE:
728                 if (prot & KVM_PGTABLE_PROT_X)
729                         return -EINVAL;
730                 attr = KVM_S2_MEMATTR(pgt, DEVICE_nGnRE);
731                 break;
732         case KVM_PGTABLE_PROT_NORMAL_NC:
733                 if (prot & KVM_PGTABLE_PROT_X)
734                         return -EINVAL;
735                 attr = KVM_S2_MEMATTR(pgt, NORMAL_NC);
736                 break;
737         default:
738                 attr = KVM_S2_MEMATTR(pgt, NORMAL);
739         }
740
741         if (!(prot & KVM_PGTABLE_PROT_X))
742                 attr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
743
744         if (prot & KVM_PGTABLE_PROT_R)
745                 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
746
747         if (prot & KVM_PGTABLE_PROT_W)
748                 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
749
750         if (!kvm_lpa2_is_enabled())
751                 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
752
753         attr |= KVM_PTE_LEAF_ATTR_LO_S2_AF;
754         attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
755         *ptep = attr;
756
757         return 0;
758 }
759
760 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte)
761 {
762         enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW;
763
764         if (!kvm_pte_valid(pte))
765                 return prot;
766
767         if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R)
768                 prot |= KVM_PGTABLE_PROT_R;
769         if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W)
770                 prot |= KVM_PGTABLE_PROT_W;
771         if (!(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN))
772                 prot |= KVM_PGTABLE_PROT_X;
773
774         return prot;
775 }
776
777 static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
778 {
779         if (!kvm_pte_valid(old) || !kvm_pte_valid(new))
780                 return true;
781
782         return ((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS));
783 }
784
785 static bool stage2_pte_is_counted(kvm_pte_t pte)
786 {
787         /*
788          * The refcount tracks valid entries as well as invalid entries if they
789          * encode ownership of a page to another entity than the page-table
790          * owner, whose id is 0.
791          */
792         return !!pte;
793 }
794
795 static bool stage2_pte_is_locked(kvm_pte_t pte)
796 {
797         return !kvm_pte_valid(pte) && (pte & KVM_INVALID_PTE_LOCKED);
798 }
799
800 static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
801 {
802         if (!kvm_pgtable_walk_shared(ctx)) {
803                 WRITE_ONCE(*ctx->ptep, new);
804                 return true;
805         }
806
807         return cmpxchg(ctx->ptep, ctx->old, new) == ctx->old;
808 }
809
810 /**
811  * stage2_try_break_pte() - Invalidates a pte according to the
812  *                          'break-before-make' requirements of the
813  *                          architecture.
814  *
815  * @ctx: context of the visited pte.
816  * @mmu: stage-2 mmu
817  *
818  * Returns: true if the pte was successfully broken.
819  *
820  * If the removed pte was valid, performs the necessary serialization and TLB
821  * invalidation for the old value. For counted ptes, drops the reference count
822  * on the containing table page.
823  */
824 static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
825                                  struct kvm_s2_mmu *mmu)
826 {
827         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
828
829         if (stage2_pte_is_locked(ctx->old)) {
830                 /*
831                  * Should never occur if this walker has exclusive access to the
832                  * page tables.
833                  */
834                 WARN_ON(!kvm_pgtable_walk_shared(ctx));
835                 return false;
836         }
837
838         if (!stage2_try_set_pte(ctx, KVM_INVALID_PTE_LOCKED))
839                 return false;
840
841         if (!kvm_pgtable_walk_skip_bbm_tlbi(ctx)) {
842                 /*
843                  * Perform the appropriate TLB invalidation based on the
844                  * evicted pte value (if any).
845                  */
846                 if (kvm_pte_table(ctx->old, ctx->level))
847                         kvm_tlb_flush_vmid_range(mmu, ctx->addr,
848                                                 kvm_granule_size(ctx->level));
849                 else if (kvm_pte_valid(ctx->old))
850                         kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
851                                      ctx->addr, ctx->level);
852         }
853
854         if (stage2_pte_is_counted(ctx->old))
855                 mm_ops->put_page(ctx->ptep);
856
857         return true;
858 }
859
860 static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
861 {
862         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
863
864         WARN_ON(!stage2_pte_is_locked(*ctx->ptep));
865
866         if (stage2_pte_is_counted(new))
867                 mm_ops->get_page(ctx->ptep);
868
869         smp_store_release(ctx->ptep, new);
870 }
871
872 static bool stage2_unmap_defer_tlb_flush(struct kvm_pgtable *pgt)
873 {
874         /*
875          * If FEAT_TLBIRANGE is implemented, defer the individual
876          * TLB invalidations until the entire walk is finished, and
877          * then use the range-based TLBI instructions to do the
878          * invalidations. Condition deferred TLB invalidation on the
879          * system supporting FWB as the optimization is entirely
880          * pointless when the unmap walker needs to perform CMOs.
881          */
882         return system_supports_tlb_range() && stage2_has_fwb(pgt);
883 }
884
885 static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx,
886                                 struct kvm_s2_mmu *mmu,
887                                 struct kvm_pgtable_mm_ops *mm_ops)
888 {
889         struct kvm_pgtable *pgt = ctx->arg;
890
891         /*
892          * Clear the existing PTE, and perform break-before-make if it was
893          * valid. Depending on the system support, defer the TLB maintenance
894          * for the same until the entire unmap walk is completed.
895          */
896         if (kvm_pte_valid(ctx->old)) {
897                 kvm_clear_pte(ctx->ptep);
898
899                 if (!stage2_unmap_defer_tlb_flush(pgt))
900                         kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
901                                         ctx->addr, ctx->level);
902         }
903
904         mm_ops->put_page(ctx->ptep);
905 }
906
907 static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
908 {
909         u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
910         return memattr == KVM_S2_MEMATTR(pgt, NORMAL);
911 }
912
913 static bool stage2_pte_executable(kvm_pte_t pte)
914 {
915         return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
916 }
917
918 static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx,
919                                        const struct stage2_map_data *data)
920 {
921         u64 phys = data->phys;
922
923         /*
924          * Stage-2 walks to update ownership data are communicated to the map
925          * walker using an invalid PA. Avoid offsetting an already invalid PA,
926          * which could overflow and make the address valid again.
927          */
928         if (!kvm_phys_is_valid(phys))
929                 return phys;
930
931         /*
932          * Otherwise, work out the correct PA based on how far the walk has
933          * gotten.
934          */
935         return phys + (ctx->addr - ctx->start);
936 }
937
938 static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
939                                         struct stage2_map_data *data)
940 {
941         u64 phys = stage2_map_walker_phys_addr(ctx, data);
942
943         if (data->force_pte && ctx->level < KVM_PGTABLE_LAST_LEVEL)
944                 return false;
945
946         return kvm_block_mapping_supported(ctx, phys);
947 }
948
949 static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
950                                       struct stage2_map_data *data)
951 {
952         kvm_pte_t new;
953         u64 phys = stage2_map_walker_phys_addr(ctx, data);
954         u64 granule = kvm_granule_size(ctx->level);
955         struct kvm_pgtable *pgt = data->mmu->pgt;
956         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
957
958         if (!stage2_leaf_mapping_allowed(ctx, data))
959                 return -E2BIG;
960
961         if (kvm_phys_is_valid(phys))
962                 new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
963         else
964                 new = kvm_init_invalid_leaf_owner(data->owner_id);
965
966         /*
967          * Skip updating the PTE if we are trying to recreate the exact
968          * same mapping or only change the access permissions. Instead,
969          * the vCPU will exit one more time from guest if still needed
970          * and then go through the path of relaxing permissions.
971          */
972         if (!stage2_pte_needs_update(ctx->old, new))
973                 return -EAGAIN;
974
975         if (!stage2_try_break_pte(ctx, data->mmu))
976                 return -EAGAIN;
977
978         /* Perform CMOs before installation of the guest stage-2 PTE */
979         if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->dcache_clean_inval_poc &&
980             stage2_pte_cacheable(pgt, new))
981                 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops),
982                                                granule);
983
984         if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->icache_inval_pou &&
985             stage2_pte_executable(new))
986                 mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule);
987
988         stage2_make_pte(ctx, new);
989
990         return 0;
991 }
992
993 static int stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx *ctx,
994                                      struct stage2_map_data *data)
995 {
996         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
997         kvm_pte_t *childp = kvm_pte_follow(ctx->old, mm_ops);
998         int ret;
999
1000         if (!stage2_leaf_mapping_allowed(ctx, data))
1001                 return 0;
1002
1003         ret = stage2_map_walker_try_leaf(ctx, data);
1004         if (ret)
1005                 return ret;
1006
1007         mm_ops->free_unlinked_table(childp, ctx->level);
1008         return 0;
1009 }
1010
1011 static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
1012                                 struct stage2_map_data *data)
1013 {
1014         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1015         kvm_pte_t *childp, new;
1016         int ret;
1017
1018         ret = stage2_map_walker_try_leaf(ctx, data);
1019         if (ret != -E2BIG)
1020                 return ret;
1021
1022         if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL))
1023                 return -EINVAL;
1024
1025         if (!data->memcache)
1026                 return -ENOMEM;
1027
1028         childp = mm_ops->zalloc_page(data->memcache);
1029         if (!childp)
1030                 return -ENOMEM;
1031
1032         if (!stage2_try_break_pte(ctx, data->mmu)) {
1033                 mm_ops->put_page(childp);
1034                 return -EAGAIN;
1035         }
1036
1037         /*
1038          * If we've run into an existing block mapping then replace it with
1039          * a table. Accesses beyond 'end' that fall within the new table
1040          * will be mapped lazily.
1041          */
1042         new = kvm_init_table_pte(childp, mm_ops);
1043         stage2_make_pte(ctx, new);
1044
1045         return 0;
1046 }
1047
1048 /*
1049  * The TABLE_PRE callback runs for table entries on the way down, looking
1050  * for table entries which we could conceivably replace with a block entry
1051  * for this mapping. If it finds one it replaces the entry and calls
1052  * kvm_pgtable_mm_ops::free_unlinked_table() to tear down the detached table.
1053  *
1054  * Otherwise, the LEAF callback performs the mapping at the existing leaves
1055  * instead.
1056  */
1057 static int stage2_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
1058                              enum kvm_pgtable_walk_flags visit)
1059 {
1060         struct stage2_map_data *data = ctx->arg;
1061
1062         switch (visit) {
1063         case KVM_PGTABLE_WALK_TABLE_PRE:
1064                 return stage2_map_walk_table_pre(ctx, data);
1065         case KVM_PGTABLE_WALK_LEAF:
1066                 return stage2_map_walk_leaf(ctx, data);
1067         default:
1068                 return -EINVAL;
1069         }
1070 }
1071
1072 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
1073                            u64 phys, enum kvm_pgtable_prot prot,
1074                            void *mc, enum kvm_pgtable_walk_flags flags)
1075 {
1076         int ret;
1077         struct stage2_map_data map_data = {
1078                 .phys           = ALIGN_DOWN(phys, PAGE_SIZE),
1079                 .mmu            = pgt->mmu,
1080                 .memcache       = mc,
1081                 .force_pte      = pgt->force_pte_cb && pgt->force_pte_cb(addr, addr + size, prot),
1082         };
1083         struct kvm_pgtable_walker walker = {
1084                 .cb             = stage2_map_walker,
1085                 .flags          = flags |
1086                                   KVM_PGTABLE_WALK_TABLE_PRE |
1087                                   KVM_PGTABLE_WALK_LEAF,
1088                 .arg            = &map_data,
1089         };
1090
1091         if (WARN_ON((pgt->flags & KVM_PGTABLE_S2_IDMAP) && (addr != phys)))
1092                 return -EINVAL;
1093
1094         ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
1095         if (ret)
1096                 return ret;
1097
1098         ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1099         dsb(ishst);
1100         return ret;
1101 }
1102
1103 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
1104                                  void *mc, u8 owner_id)
1105 {
1106         int ret;
1107         struct stage2_map_data map_data = {
1108                 .phys           = KVM_PHYS_INVALID,
1109                 .mmu            = pgt->mmu,
1110                 .memcache       = mc,
1111                 .owner_id       = owner_id,
1112                 .force_pte      = true,
1113         };
1114         struct kvm_pgtable_walker walker = {
1115                 .cb             = stage2_map_walker,
1116                 .flags          = KVM_PGTABLE_WALK_TABLE_PRE |
1117                                   KVM_PGTABLE_WALK_LEAF,
1118                 .arg            = &map_data,
1119         };
1120
1121         if (owner_id > KVM_MAX_OWNER_ID)
1122                 return -EINVAL;
1123
1124         ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1125         return ret;
1126 }
1127
1128 static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
1129                                enum kvm_pgtable_walk_flags visit)
1130 {
1131         struct kvm_pgtable *pgt = ctx->arg;
1132         struct kvm_s2_mmu *mmu = pgt->mmu;
1133         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1134         kvm_pte_t *childp = NULL;
1135         bool need_flush = false;
1136
1137         if (!kvm_pte_valid(ctx->old)) {
1138                 if (stage2_pte_is_counted(ctx->old)) {
1139                         kvm_clear_pte(ctx->ptep);
1140                         mm_ops->put_page(ctx->ptep);
1141                 }
1142                 return 0;
1143         }
1144
1145         if (kvm_pte_table(ctx->old, ctx->level)) {
1146                 childp = kvm_pte_follow(ctx->old, mm_ops);
1147
1148                 if (mm_ops->page_count(childp) != 1)
1149                         return 0;
1150         } else if (stage2_pte_cacheable(pgt, ctx->old)) {
1151                 need_flush = !stage2_has_fwb(pgt);
1152         }
1153
1154         /*
1155          * This is similar to the map() path in that we unmap the entire
1156          * block entry and rely on the remaining portions being faulted
1157          * back lazily.
1158          */
1159         stage2_unmap_put_pte(ctx, mmu, mm_ops);
1160
1161         if (need_flush && mm_ops->dcache_clean_inval_poc)
1162                 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
1163                                                kvm_granule_size(ctx->level));
1164
1165         if (childp)
1166                 mm_ops->put_page(childp);
1167
1168         return 0;
1169 }
1170
1171 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
1172 {
1173         int ret;
1174         struct kvm_pgtable_walker walker = {
1175                 .cb     = stage2_unmap_walker,
1176                 .arg    = pgt,
1177                 .flags  = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
1178         };
1179
1180         ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1181         if (stage2_unmap_defer_tlb_flush(pgt))
1182                 /* Perform the deferred TLB invalidations */
1183                 kvm_tlb_flush_vmid_range(pgt->mmu, addr, size);
1184
1185         return ret;
1186 }
1187
1188 struct stage2_attr_data {
1189         kvm_pte_t                       attr_set;
1190         kvm_pte_t                       attr_clr;
1191         kvm_pte_t                       pte;
1192         s8                              level;
1193 };
1194
1195 static int stage2_attr_walker(const struct kvm_pgtable_visit_ctx *ctx,
1196                               enum kvm_pgtable_walk_flags visit)
1197 {
1198         kvm_pte_t pte = ctx->old;
1199         struct stage2_attr_data *data = ctx->arg;
1200         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1201
1202         if (!kvm_pte_valid(ctx->old))
1203                 return -EAGAIN;
1204
1205         data->level = ctx->level;
1206         data->pte = pte;
1207         pte &= ~data->attr_clr;
1208         pte |= data->attr_set;
1209
1210         /*
1211          * We may race with the CPU trying to set the access flag here,
1212          * but worst-case the access flag update gets lost and will be
1213          * set on the next access instead.
1214          */
1215         if (data->pte != pte) {
1216                 /*
1217                  * Invalidate instruction cache before updating the guest
1218                  * stage-2 PTE if we are going to add executable permission.
1219                  */
1220                 if (mm_ops->icache_inval_pou &&
1221                     stage2_pte_executable(pte) && !stage2_pte_executable(ctx->old))
1222                         mm_ops->icache_inval_pou(kvm_pte_follow(pte, mm_ops),
1223                                                   kvm_granule_size(ctx->level));
1224
1225                 if (!stage2_try_set_pte(ctx, pte))
1226                         return -EAGAIN;
1227         }
1228
1229         return 0;
1230 }
1231
1232 static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr,
1233                                     u64 size, kvm_pte_t attr_set,
1234                                     kvm_pte_t attr_clr, kvm_pte_t *orig_pte,
1235                                     s8 *level, enum kvm_pgtable_walk_flags flags)
1236 {
1237         int ret;
1238         kvm_pte_t attr_mask = KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI;
1239         struct stage2_attr_data data = {
1240                 .attr_set       = attr_set & attr_mask,
1241                 .attr_clr       = attr_clr & attr_mask,
1242         };
1243         struct kvm_pgtable_walker walker = {
1244                 .cb             = stage2_attr_walker,
1245                 .arg            = &data,
1246                 .flags          = flags | KVM_PGTABLE_WALK_LEAF,
1247         };
1248
1249         ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1250         if (ret)
1251                 return ret;
1252
1253         if (orig_pte)
1254                 *orig_pte = data.pte;
1255
1256         if (level)
1257                 *level = data.level;
1258         return 0;
1259 }
1260
1261 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
1262 {
1263         return stage2_update_leaf_attrs(pgt, addr, size, 0,
1264                                         KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W,
1265                                         NULL, NULL, 0);
1266 }
1267
1268 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
1269 {
1270         kvm_pte_t pte = 0;
1271         int ret;
1272
1273         ret = stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0,
1274                                        &pte, NULL,
1275                                        KVM_PGTABLE_WALK_HANDLE_FAULT |
1276                                        KVM_PGTABLE_WALK_SHARED);
1277         if (!ret)
1278                 dsb(ishst);
1279
1280         return pte;
1281 }
1282
1283 struct stage2_age_data {
1284         bool    mkold;
1285         bool    young;
1286 };
1287
1288 static int stage2_age_walker(const struct kvm_pgtable_visit_ctx *ctx,
1289                              enum kvm_pgtable_walk_flags visit)
1290 {
1291         kvm_pte_t new = ctx->old & ~KVM_PTE_LEAF_ATTR_LO_S2_AF;
1292         struct stage2_age_data *data = ctx->arg;
1293
1294         if (!kvm_pte_valid(ctx->old) || new == ctx->old)
1295                 return 0;
1296
1297         data->young = true;
1298
1299         /*
1300          * stage2_age_walker() is always called while holding the MMU lock for
1301          * write, so this will always succeed. Nonetheless, this deliberately
1302          * follows the race detection pattern of the other stage-2 walkers in
1303          * case the locking mechanics of the MMU notifiers is ever changed.
1304          */
1305         if (data->mkold && !stage2_try_set_pte(ctx, new))
1306                 return -EAGAIN;
1307
1308         /*
1309          * "But where's the TLBI?!", you scream.
1310          * "Over in the core code", I sigh.
1311          *
1312          * See the '->clear_flush_young()' callback on the KVM mmu notifier.
1313          */
1314         return 0;
1315 }
1316
1317 bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
1318                                          u64 size, bool mkold)
1319 {
1320         struct stage2_age_data data = {
1321                 .mkold          = mkold,
1322         };
1323         struct kvm_pgtable_walker walker = {
1324                 .cb             = stage2_age_walker,
1325                 .arg            = &data,
1326                 .flags          = KVM_PGTABLE_WALK_LEAF,
1327         };
1328
1329         WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker));
1330         return data.young;
1331 }
1332
1333 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
1334                                    enum kvm_pgtable_prot prot)
1335 {
1336         int ret;
1337         s8 level;
1338         kvm_pte_t set = 0, clr = 0;
1339
1340         if (prot & KVM_PTE_LEAF_ATTR_HI_SW)
1341                 return -EINVAL;
1342
1343         if (prot & KVM_PGTABLE_PROT_R)
1344                 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
1345
1346         if (prot & KVM_PGTABLE_PROT_W)
1347                 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
1348
1349         if (prot & KVM_PGTABLE_PROT_X)
1350                 clr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
1351
1352         ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level,
1353                                        KVM_PGTABLE_WALK_HANDLE_FAULT |
1354                                        KVM_PGTABLE_WALK_SHARED);
1355         if (!ret || ret == -EAGAIN)
1356                 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa_nsh, pgt->mmu, addr, level);
1357         return ret;
1358 }
1359
1360 static int stage2_flush_walker(const struct kvm_pgtable_visit_ctx *ctx,
1361                                enum kvm_pgtable_walk_flags visit)
1362 {
1363         struct kvm_pgtable *pgt = ctx->arg;
1364         struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
1365
1366         if (!kvm_pte_valid(ctx->old) || !stage2_pte_cacheable(pgt, ctx->old))
1367                 return 0;
1368
1369         if (mm_ops->dcache_clean_inval_poc)
1370                 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
1371                                                kvm_granule_size(ctx->level));
1372         return 0;
1373 }
1374
1375 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
1376 {
1377         struct kvm_pgtable_walker walker = {
1378                 .cb     = stage2_flush_walker,
1379                 .flags  = KVM_PGTABLE_WALK_LEAF,
1380                 .arg    = pgt,
1381         };
1382
1383         if (stage2_has_fwb(pgt))
1384                 return 0;
1385
1386         return kvm_pgtable_walk(pgt, addr, size, &walker);
1387 }
1388
1389 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
1390                                               u64 phys, s8 level,
1391                                               enum kvm_pgtable_prot prot,
1392                                               void *mc, bool force_pte)
1393 {
1394         struct stage2_map_data map_data = {
1395                 .phys           = phys,
1396                 .mmu            = pgt->mmu,
1397                 .memcache       = mc,
1398                 .force_pte      = force_pte,
1399         };
1400         struct kvm_pgtable_walker walker = {
1401                 .cb             = stage2_map_walker,
1402                 .flags          = KVM_PGTABLE_WALK_LEAF |
1403                                   KVM_PGTABLE_WALK_SKIP_BBM_TLBI |
1404                                   KVM_PGTABLE_WALK_SKIP_CMO,
1405                 .arg            = &map_data,
1406         };
1407         /*
1408          * The input address (.addr) is irrelevant for walking an
1409          * unlinked table. Construct an ambiguous IA range to map
1410          * kvm_granule_size(level) worth of memory.
1411          */
1412         struct kvm_pgtable_walk_data data = {
1413                 .walker = &walker,
1414                 .addr   = 0,
1415                 .end    = kvm_granule_size(level),
1416         };
1417         struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
1418         kvm_pte_t *pgtable;
1419         int ret;
1420
1421         if (!IS_ALIGNED(phys, kvm_granule_size(level)))
1422                 return ERR_PTR(-EINVAL);
1423
1424         ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
1425         if (ret)
1426                 return ERR_PTR(ret);
1427
1428         pgtable = mm_ops->zalloc_page(mc);
1429         if (!pgtable)
1430                 return ERR_PTR(-ENOMEM);
1431
1432         ret = __kvm_pgtable_walk(&data, mm_ops, (kvm_pteref_t)pgtable,
1433                                  level + 1);
1434         if (ret) {
1435                 kvm_pgtable_stage2_free_unlinked(mm_ops, pgtable, level);
1436                 return ERR_PTR(ret);
1437         }
1438
1439         return pgtable;
1440 }
1441
1442 /*
1443  * Get the number of page-tables needed to replace a block with a
1444  * fully populated tree up to the PTE entries. Note that @level is
1445  * interpreted as in "level @level entry".
1446  */
1447 static int stage2_block_get_nr_page_tables(s8 level)
1448 {
1449         switch (level) {
1450         case 1:
1451                 return PTRS_PER_PTE + 1;
1452         case 2:
1453                 return 1;
1454         case 3:
1455                 return 0;
1456         default:
1457                 WARN_ON_ONCE(level < KVM_PGTABLE_MIN_BLOCK_LEVEL ||
1458                              level > KVM_PGTABLE_LAST_LEVEL);
1459                 return -EINVAL;
1460         };
1461 }
1462
1463 static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx,
1464                                enum kvm_pgtable_walk_flags visit)
1465 {
1466         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1467         struct kvm_mmu_memory_cache *mc = ctx->arg;
1468         struct kvm_s2_mmu *mmu;
1469         kvm_pte_t pte = ctx->old, new, *childp;
1470         enum kvm_pgtable_prot prot;
1471         s8 level = ctx->level;
1472         bool force_pte;
1473         int nr_pages;
1474         u64 phys;
1475
1476         /* No huge-pages exist at the last level */
1477         if (level == KVM_PGTABLE_LAST_LEVEL)
1478                 return 0;
1479
1480         /* We only split valid block mappings */
1481         if (!kvm_pte_valid(pte))
1482                 return 0;
1483
1484         nr_pages = stage2_block_get_nr_page_tables(level);
1485         if (nr_pages < 0)
1486                 return nr_pages;
1487
1488         if (mc->nobjs >= nr_pages) {
1489                 /* Build a tree mapped down to the PTE granularity. */
1490                 force_pte = true;
1491         } else {
1492                 /*
1493                  * Don't force PTEs, so create_unlinked() below does
1494                  * not populate the tree up to the PTE level. The
1495                  * consequence is that the call will require a single
1496                  * page of level 2 entries at level 1, or a single
1497                  * page of PTEs at level 2. If we are at level 1, the
1498                  * PTEs will be created recursively.
1499                  */
1500                 force_pte = false;
1501                 nr_pages = 1;
1502         }
1503
1504         if (mc->nobjs < nr_pages)
1505                 return -ENOMEM;
1506
1507         mmu = container_of(mc, struct kvm_s2_mmu, split_page_cache);
1508         phys = kvm_pte_to_phys(pte);
1509         prot = kvm_pgtable_stage2_pte_prot(pte);
1510
1511         childp = kvm_pgtable_stage2_create_unlinked(mmu->pgt, phys,
1512                                                     level, prot, mc, force_pte);
1513         if (IS_ERR(childp))
1514                 return PTR_ERR(childp);
1515
1516         if (!stage2_try_break_pte(ctx, mmu)) {
1517                 kvm_pgtable_stage2_free_unlinked(mm_ops, childp, level);
1518                 return -EAGAIN;
1519         }
1520
1521         /*
1522          * Note, the contents of the page table are guaranteed to be made
1523          * visible before the new PTE is assigned because stage2_make_pte()
1524          * writes the PTE using smp_store_release().
1525          */
1526         new = kvm_init_table_pte(childp, mm_ops);
1527         stage2_make_pte(ctx, new);
1528         dsb(ishst);
1529         return 0;
1530 }
1531
1532 int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
1533                              struct kvm_mmu_memory_cache *mc)
1534 {
1535         struct kvm_pgtable_walker walker = {
1536                 .cb     = stage2_split_walker,
1537                 .flags  = KVM_PGTABLE_WALK_LEAF,
1538                 .arg    = mc,
1539         };
1540
1541         return kvm_pgtable_walk(pgt, addr, size, &walker);
1542 }
1543
1544 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
1545                               struct kvm_pgtable_mm_ops *mm_ops,
1546                               enum kvm_pgtable_stage2_flags flags,
1547                               kvm_pgtable_force_pte_cb_t force_pte_cb)
1548 {
1549         size_t pgd_sz;
1550         u64 vtcr = mmu->vtcr;
1551         u32 ia_bits = VTCR_EL2_IPA(vtcr);
1552         u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1553         s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1554
1555         pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1556         pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_pages_exact(pgd_sz);
1557         if (!pgt->pgd)
1558                 return -ENOMEM;
1559
1560         pgt->ia_bits            = ia_bits;
1561         pgt->start_level        = start_level;
1562         pgt->mm_ops             = mm_ops;
1563         pgt->mmu                = mmu;
1564         pgt->flags              = flags;
1565         pgt->force_pte_cb       = force_pte_cb;
1566
1567         /* Ensure zeroed PGD pages are visible to the hardware walker */
1568         dsb(ishst);
1569         return 0;
1570 }
1571
1572 size_t kvm_pgtable_stage2_pgd_size(u64 vtcr)
1573 {
1574         u32 ia_bits = VTCR_EL2_IPA(vtcr);
1575         u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1576         s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1577
1578         return kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1579 }
1580
1581 static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
1582                               enum kvm_pgtable_walk_flags visit)
1583 {
1584         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1585
1586         if (!stage2_pte_is_counted(ctx->old))
1587                 return 0;
1588
1589         mm_ops->put_page(ctx->ptep);
1590
1591         if (kvm_pte_table(ctx->old, ctx->level))
1592                 mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops));
1593
1594         return 0;
1595 }
1596
1597 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
1598 {
1599         size_t pgd_sz;
1600         struct kvm_pgtable_walker walker = {
1601                 .cb     = stage2_free_walker,
1602                 .flags  = KVM_PGTABLE_WALK_LEAF |
1603                           KVM_PGTABLE_WALK_TABLE_POST,
1604         };
1605
1606         WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
1607         pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
1608         pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz);
1609         pgt->pgd = NULL;
1610 }
1611
1612 void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
1613 {
1614         kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
1615         struct kvm_pgtable_walker walker = {
1616                 .cb     = stage2_free_walker,
1617                 .flags  = KVM_PGTABLE_WALK_LEAF |
1618                           KVM_PGTABLE_WALK_TABLE_POST,
1619         };
1620         struct kvm_pgtable_walk_data data = {
1621                 .walker = &walker,
1622
1623                 /*
1624                  * At this point the IPA really doesn't matter, as the page
1625                  * table being traversed has already been removed from the stage
1626                  * 2. Set an appropriate range to cover the entire page table.
1627                  */
1628                 .addr   = 0,
1629                 .end    = kvm_granule_size(level),
1630         };
1631
1632         WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1));
1633
1634         WARN_ON(mm_ops->page_count(pgtable) != 1);
1635         mm_ops->put_page(pgtable);
1636 }