KVM: PPC: Book3S HV: Fix software walk of guest process page tables
[linux-2.6-block.git] / arch / powerpc / kvm / book3s_64_mmu_radix.c
CommitLineData
9e04ba69
PM
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 */
8
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/kvm.h>
12#include <linux/kvm_host.h>
13
14#include <asm/kvm_ppc.h>
15#include <asm/kvm_book3s.h>
16#include <asm/page.h>
17#include <asm/mmu.h>
18#include <asm/pgtable.h>
19#include <asm/pgalloc.h>
20
21/*
22 * Supported radix tree geometry.
23 * Like p9, we support either 5 or 9 bits at the first (lowest) level,
24 * for a page size of 64k or 4k.
25 */
26static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
27
28int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
29 struct kvmppc_pte *gpte, bool data, bool iswrite)
30{
31 struct kvm *kvm = vcpu->kvm;
32 u32 pid;
33 int ret, level, ps;
34 __be64 prte, rpte;
70cd4c10 35 unsigned long ptbl;
9e04ba69
PM
36 unsigned long root, pte, index;
37 unsigned long rts, bits, offset;
38 unsigned long gpa;
39 unsigned long proc_tbl_size;
40
41 /* Work out effective PID */
42 switch (eaddr >> 62) {
43 case 0:
44 pid = vcpu->arch.pid;
45 break;
46 case 3:
47 pid = 0;
48 break;
49 default:
50 return -EINVAL;
51 }
52 proc_tbl_size = 1 << ((kvm->arch.process_table & PRTS_MASK) + 12);
53 if (pid * 16 >= proc_tbl_size)
54 return -EINVAL;
55
56 /* Read partition table to find root of tree for effective PID */
70cd4c10
PM
57 ptbl = (kvm->arch.process_table & PRTB_MASK) + (pid * 16);
58 ret = kvm_read_guest(kvm, ptbl, &prte, sizeof(prte));
9e04ba69
PM
59 if (ret)
60 return ret;
61
62 root = be64_to_cpu(prte);
63 rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
64 ((root & RTS2_MASK) >> RTS2_SHIFT);
65 bits = root & RPDS_MASK;
66 root = root & RPDB_MASK;
67
68 /* P9 DD1 interprets RTS (radix tree size) differently */
69 offset = rts + 31;
70 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
71 offset -= 3;
72
73 /* current implementations only support 52-bit space */
74 if (offset != 52)
75 return -EINVAL;
76
77 for (level = 3; level >= 0; --level) {
78 if (level && bits != p9_supported_radix_bits[level])
79 return -EINVAL;
80 if (level == 0 && !(bits == 5 || bits == 9))
81 return -EINVAL;
82 offset -= bits;
83 index = (eaddr >> offset) & ((1UL << bits) - 1);
84 /* check that low bits of page table base are zero */
85 if (root & ((1UL << (bits + 3)) - 1))
86 return -EINVAL;
87 ret = kvm_read_guest(kvm, root + index * 8,
88 &rpte, sizeof(rpte));
89 if (ret)
90 return ret;
91 pte = __be64_to_cpu(rpte);
92 if (!(pte & _PAGE_PRESENT))
93 return -ENOENT;
94 if (pte & _PAGE_PTE)
95 break;
96 bits = pte & 0x1f;
97 root = pte & 0x0fffffffffffff00ul;
98 }
99 /* need a leaf at lowest level; 512GB pages not supported */
100 if (level < 0 || level == 3)
101 return -EINVAL;
102
103 /* offset is now log base 2 of the page size */
104 gpa = pte & 0x01fffffffffff000ul;
105 if (gpa & ((1ul << offset) - 1))
106 return -EINVAL;
107 gpa += eaddr & ((1ul << offset) - 1);
108 for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
109 if (offset == mmu_psize_defs[ps].shift)
110 break;
111 gpte->page_size = ps;
112
113 gpte->eaddr = eaddr;
114 gpte->raddr = gpa;
115
116 /* Work out permissions */
117 gpte->may_read = !!(pte & _PAGE_READ);
118 gpte->may_write = !!(pte & _PAGE_WRITE);
119 gpte->may_execute = !!(pte & _PAGE_EXEC);
120 if (kvmppc_get_msr(vcpu) & MSR_PR) {
121 if (pte & _PAGE_PRIVILEGED) {
122 gpte->may_read = 0;
123 gpte->may_write = 0;
124 gpte->may_execute = 0;
125 }
126 } else {
127 if (!(pte & _PAGE_PRIVILEGED)) {
128 /* Check AMR/IAMR to see if strict mode is in force */
129 if (vcpu->arch.amr & (1ul << 62))
130 gpte->may_read = 0;
131 if (vcpu->arch.amr & (1ul << 63))
132 gpte->may_write = 0;
133 if (vcpu->arch.iamr & (1ul << 62))
134 gpte->may_execute = 0;
135 }
136 }
137
138 return 0;
139}
140
5a319350
PM
141#ifdef CONFIG_PPC_64K_PAGES
142#define MMU_BASE_PSIZE MMU_PAGE_64K
143#else
144#define MMU_BASE_PSIZE MMU_PAGE_4K
145#endif
146
147static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
148 unsigned int pshift)
149{
150 int psize = MMU_BASE_PSIZE;
151
152 if (pshift >= PMD_SHIFT)
153 psize = MMU_PAGE_2M;
154 addr &= ~0xfffUL;
155 addr |= mmu_psize_defs[psize].ap << 5;
156 asm volatile("ptesync": : :"memory");
157 asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
158 : : "r" (addr), "r" (kvm->arch.lpid) : "memory");
159 asm volatile("ptesync": : :"memory");
160}
161
8f7b79b8
PM
162unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
163 unsigned long clr, unsigned long set,
164 unsigned long addr, unsigned int shift)
5a319350 165{
8f7b79b8
PM
166 unsigned long old = 0;
167
5a319350
PM
168 if (!(clr & _PAGE_PRESENT) && cpu_has_feature(CPU_FTR_POWER9_DD1) &&
169 pte_present(*ptep)) {
170 /* have to invalidate it first */
8f7b79b8 171 old = __radix_pte_update(ptep, _PAGE_PRESENT, 0);
5a319350
PM
172 kvmppc_radix_tlbie_page(kvm, addr, shift);
173 set |= _PAGE_PRESENT;
8f7b79b8 174 old &= _PAGE_PRESENT;
5a319350 175 }
8f7b79b8 176 return __radix_pte_update(ptep, clr, set) | old;
5a319350
PM
177}
178
179void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
180 pte_t *ptep, pte_t pte)
181{
182 radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
183}
184
185static struct kmem_cache *kvm_pte_cache;
186
187static pte_t *kvmppc_pte_alloc(void)
188{
189 return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
190}
191
192static void kvmppc_pte_free(pte_t *ptep)
193{
194 kmem_cache_free(kvm_pte_cache, ptep);
195}
196
197static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
198 unsigned int level, unsigned long mmu_seq)
199{
200 pgd_t *pgd;
201 pud_t *pud, *new_pud = NULL;
202 pmd_t *pmd, *new_pmd = NULL;
203 pte_t *ptep, *new_ptep = NULL;
8f7b79b8 204 unsigned long old;
5a319350
PM
205 int ret;
206
207 /* Traverse the guest's 2nd-level tree, allocate new levels needed */
208 pgd = kvm->arch.pgtable + pgd_index(gpa);
209 pud = NULL;
210 if (pgd_present(*pgd))
211 pud = pud_offset(pgd, gpa);
212 else
213 new_pud = pud_alloc_one(kvm->mm, gpa);
214
215 pmd = NULL;
216 if (pud && pud_present(*pud))
217 pmd = pmd_offset(pud, gpa);
218 else
219 new_pmd = pmd_alloc_one(kvm->mm, gpa);
220
221 if (level == 0 && !(pmd && pmd_present(*pmd)))
222 new_ptep = kvmppc_pte_alloc();
223
224 /* Check if we might have been invalidated; let the guest retry if so */
225 spin_lock(&kvm->mmu_lock);
226 ret = -EAGAIN;
227 if (mmu_notifier_retry(kvm, mmu_seq))
228 goto out_unlock;
229
230 /* Now traverse again under the lock and change the tree */
231 ret = -ENOMEM;
232 if (pgd_none(*pgd)) {
233 if (!new_pud)
234 goto out_unlock;
235 pgd_populate(kvm->mm, pgd, new_pud);
236 new_pud = NULL;
237 }
238 pud = pud_offset(pgd, gpa);
239 if (pud_none(*pud)) {
240 if (!new_pmd)
241 goto out_unlock;
242 pud_populate(kvm->mm, pud, new_pmd);
243 new_pmd = NULL;
244 }
245 pmd = pmd_offset(pud, gpa);
246 if (pmd_large(*pmd)) {
247 /* Someone else has instantiated a large page here; retry */
248 ret = -EAGAIN;
249 goto out_unlock;
250 }
251 if (level == 1 && !pmd_none(*pmd)) {
252 /*
253 * There's a page table page here, but we wanted
254 * to install a large page. Tell the caller and let
255 * it try installing a normal page if it wants.
256 */
257 ret = -EBUSY;
258 goto out_unlock;
259 }
260 if (level == 0) {
261 if (pmd_none(*pmd)) {
262 if (!new_ptep)
263 goto out_unlock;
264 pmd_populate(kvm->mm, pmd, new_ptep);
265 new_ptep = NULL;
266 }
267 ptep = pte_offset_kernel(pmd, gpa);
268 if (pte_present(*ptep)) {
269 /* PTE was previously valid, so invalidate it */
8f7b79b8
PM
270 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT,
271 0, gpa, 0);
5a319350 272 kvmppc_radix_tlbie_page(kvm, gpa, 0);
8f7b79b8
PM
273 if (old & _PAGE_DIRTY)
274 mark_page_dirty(kvm, gpa >> PAGE_SHIFT);
5a319350
PM
275 }
276 kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
277 } else {
278 kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
279 }
280 ret = 0;
281
282 out_unlock:
283 spin_unlock(&kvm->mmu_lock);
284 if (new_pud)
285 pud_free(kvm->mm, new_pud);
286 if (new_pmd)
287 pmd_free(kvm->mm, new_pmd);
288 if (new_ptep)
289 kvmppc_pte_free(new_ptep);
290 return ret;
291}
292
293int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
294 unsigned long ea, unsigned long dsisr)
295{
296 struct kvm *kvm = vcpu->kvm;
297 unsigned long mmu_seq, pte_size;
298 unsigned long gpa, gfn, hva, pfn;
299 struct kvm_memory_slot *memslot;
300 struct page *page = NULL, *pages[1];
301 long ret, npages, ok;
302 unsigned int writing;
303 struct vm_area_struct *vma;
304 unsigned long flags;
305 pte_t pte, *ptep;
306 unsigned long pgflags;
307 unsigned int shift, level;
308
309 /* Check for unusual errors */
310 if (dsisr & DSISR_UNSUPP_MMU) {
311 pr_err("KVM: Got unsupported MMU fault\n");
312 return -EFAULT;
313 }
314 if (dsisr & DSISR_BADACCESS) {
315 /* Reflect to the guest as DSI */
316 pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
317 kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
318 return RESUME_GUEST;
319 }
320
321 /* Translate the logical address and get the page */
322 gpa = vcpu->arch.fault_gpa & ~0xfffUL;
323 gpa &= ~0xF000000000000000ul;
324 gfn = gpa >> PAGE_SHIFT;
325 if (!(dsisr & DSISR_PGDIRFAULT))
326 gpa |= ea & 0xfff;
327 memslot = gfn_to_memslot(kvm, gfn);
328
329 /* No memslot means it's an emulated MMIO region */
330 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
331 if (dsisr & (DSISR_PGDIRFAULT | DSISR_BADACCESS |
332 DSISR_SET_RC)) {
333 /*
334 * Bad address in guest page table tree, or other
335 * unusual error - reflect it to the guest as DSI.
336 */
337 kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
338 return RESUME_GUEST;
339 }
340 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
341 dsisr & DSISR_ISSTORE);
342 }
343
344 /* used to check for invalidations in progress */
345 mmu_seq = kvm->mmu_notifier_seq;
346 smp_rmb();
347
348 writing = (dsisr & DSISR_ISSTORE) != 0;
349 hva = gfn_to_hva_memslot(memslot, gfn);
350 if (dsisr & DSISR_SET_RC) {
351 /*
352 * Need to set an R or C bit in the 2nd-level tables;
353 * if the relevant bits aren't already set in the linux
354 * page tables, fall through to do the gup_fast to
355 * set them in the linux page tables too.
356 */
357 ok = 0;
358 pgflags = _PAGE_ACCESSED;
359 if (writing)
360 pgflags |= _PAGE_DIRTY;
361 local_irq_save(flags);
362 ptep = __find_linux_pte_or_hugepte(current->mm->pgd, hva,
363 NULL, NULL);
364 if (ptep) {
365 pte = READ_ONCE(*ptep);
366 if (pte_present(pte) &&
367 (pte_val(pte) & pgflags) == pgflags)
368 ok = 1;
369 }
370 local_irq_restore(flags);
371 if (ok) {
372 spin_lock(&kvm->mmu_lock);
373 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
374 spin_unlock(&kvm->mmu_lock);
375 return RESUME_GUEST;
376 }
377 ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable,
378 gpa, NULL, &shift);
379 if (ptep && pte_present(*ptep)) {
380 kvmppc_radix_update_pte(kvm, ptep, 0, pgflags,
381 gpa, shift);
382 spin_unlock(&kvm->mmu_lock);
383 return RESUME_GUEST;
384 }
385 spin_unlock(&kvm->mmu_lock);
386 }
387 }
388
389 ret = -EFAULT;
390 pfn = 0;
391 pte_size = PAGE_SIZE;
392 pgflags = _PAGE_READ | _PAGE_EXEC;
393 level = 0;
394 npages = get_user_pages_fast(hva, 1, writing, pages);
395 if (npages < 1) {
396 /* Check if it's an I/O mapping */
397 down_read(&current->mm->mmap_sem);
398 vma = find_vma(current->mm, hva);
399 if (vma && vma->vm_start <= hva && hva < vma->vm_end &&
400 (vma->vm_flags & VM_PFNMAP)) {
401 pfn = vma->vm_pgoff +
402 ((hva - vma->vm_start) >> PAGE_SHIFT);
403 pgflags = pgprot_val(vma->vm_page_prot);
404 }
405 up_read(&current->mm->mmap_sem);
406 if (!pfn)
407 return -EFAULT;
408 } else {
409 page = pages[0];
410 pfn = page_to_pfn(page);
411 if (PageHuge(page)) {
412 page = compound_head(page);
413 pte_size <<= compound_order(page);
414 /* See if we can insert a 2MB large-page PTE here */
415 if (pte_size >= PMD_SIZE &&
416 (gpa & PMD_MASK & PAGE_MASK) ==
417 (hva & PMD_MASK & PAGE_MASK)) {
418 level = 1;
419 pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
420 }
421 }
422 /* See if we can provide write access */
423 if (writing) {
424 /*
425 * We assume gup_fast has set dirty on the host PTE.
426 */
427 pgflags |= _PAGE_WRITE;
428 } else {
429 local_irq_save(flags);
430 ptep = __find_linux_pte_or_hugepte(current->mm->pgd,
431 hva, NULL, NULL);
432 if (ptep && pte_write(*ptep) && pte_dirty(*ptep))
433 pgflags |= _PAGE_WRITE;
434 local_irq_restore(flags);
435 }
436 }
437
438 /*
439 * Compute the PTE value that we need to insert.
440 */
441 pgflags |= _PAGE_PRESENT | _PAGE_PTE | _PAGE_ACCESSED;
442 if (pgflags & _PAGE_WRITE)
443 pgflags |= _PAGE_DIRTY;
444 pte = pfn_pte(pfn, __pgprot(pgflags));
445
446 /* Allocate space in the tree and write the PTE */
447 ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
448 if (ret == -EBUSY) {
449 /*
450 * There's already a PMD where wanted to install a large page;
451 * for now, fall back to installing a small page.
452 */
453 level = 0;
454 pfn |= gfn & ((PMD_SIZE >> PAGE_SHIFT) - 1);
455 pte = pfn_pte(pfn, __pgprot(pgflags));
456 ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
457 }
458 if (ret == 0 || ret == -EAGAIN)
459 ret = RESUME_GUEST;
460
461 if (page) {
462 /*
463 * We drop pages[0] here, not page because page might
464 * have been set to the head page of a compound, but
465 * we have to drop the reference on the correct tail
466 * page to match the get inside gup()
467 */
468 put_page(pages[0]);
469 }
470 return ret;
471}
472
8f7b79b8
PM
473static void mark_pages_dirty(struct kvm *kvm, struct kvm_memory_slot *memslot,
474 unsigned long gfn, unsigned int order)
475{
476 unsigned long i, limit;
477 unsigned long *dp;
478
479 if (!memslot->dirty_bitmap)
480 return;
481 limit = 1ul << order;
482 if (limit < BITS_PER_LONG) {
483 for (i = 0; i < limit; ++i)
484 mark_page_dirty(kvm, gfn + i);
485 return;
486 }
487 dp = memslot->dirty_bitmap + (gfn - memslot->base_gfn);
488 limit /= BITS_PER_LONG;
489 for (i = 0; i < limit; ++i)
490 *dp++ = ~0ul;
491}
492
01756099
PM
493/* Called with kvm->lock held */
494int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
495 unsigned long gfn)
496{
497 pte_t *ptep;
498 unsigned long gpa = gfn << PAGE_SHIFT;
499 unsigned int shift;
8f7b79b8 500 unsigned long old;
01756099
PM
501
502 ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa,
503 NULL, &shift);
504 if (ptep && pte_present(*ptep)) {
8f7b79b8
PM
505 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, 0,
506 gpa, shift);
01756099 507 kvmppc_radix_tlbie_page(kvm, gpa, shift);
8f7b79b8
PM
508 if (old & _PAGE_DIRTY) {
509 if (!shift)
510 mark_page_dirty(kvm, gfn);
511 else
512 mark_pages_dirty(kvm, memslot,
513 gfn, shift - PAGE_SHIFT);
514 }
01756099
PM
515 }
516 return 0;
517}
518
519/* Called with kvm->lock held */
520int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
521 unsigned long gfn)
522{
523 pte_t *ptep;
524 unsigned long gpa = gfn << PAGE_SHIFT;
525 unsigned int shift;
526 int ref = 0;
527
528 ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa,
529 NULL, &shift);
530 if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
531 kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
532 gpa, shift);
533 /* XXX need to flush tlb here? */
534 ref = 1;
535 }
536 return ref;
537}
538
539/* Called with kvm->lock held */
540int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
541 unsigned long gfn)
542{
543 pte_t *ptep;
544 unsigned long gpa = gfn << PAGE_SHIFT;
545 unsigned int shift;
546 int ref = 0;
547
548 ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa,
549 NULL, &shift);
550 if (ptep && pte_present(*ptep) && pte_young(*ptep))
551 ref = 1;
552 return ref;
553}
554
8f7b79b8
PM
555/* Returns the number of PAGE_SIZE pages that are dirty */
556static int kvm_radix_test_clear_dirty(struct kvm *kvm,
557 struct kvm_memory_slot *memslot, int pagenum)
558{
559 unsigned long gfn = memslot->base_gfn + pagenum;
560 unsigned long gpa = gfn << PAGE_SHIFT;
561 pte_t *ptep;
562 unsigned int shift;
563 int ret = 0;
564
565 ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa,
566 NULL, &shift);
567 if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
568 ret = 1;
569 if (shift)
570 ret = 1 << (shift - PAGE_SHIFT);
571 kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
572 gpa, shift);
573 kvmppc_radix_tlbie_page(kvm, gpa, shift);
574 }
575 return ret;
576}
577
578long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
579 struct kvm_memory_slot *memslot, unsigned long *map)
580{
581 unsigned long i, j;
582 unsigned long n, *p;
583 int npages;
584
585 /*
586 * Radix accumulates dirty bits in the first half of the
587 * memslot's dirty_bitmap area, for when pages are paged
588 * out or modified by the host directly. Pick up these
589 * bits and add them to the map.
590 */
591 n = kvm_dirty_bitmap_bytes(memslot) / sizeof(long);
592 p = memslot->dirty_bitmap;
593 for (i = 0; i < n; ++i)
594 map[i] |= xchg(&p[i], 0);
595
596 for (i = 0; i < memslot->npages; i = j) {
597 npages = kvm_radix_test_clear_dirty(kvm, memslot, i);
598
599 /*
600 * Note that if npages > 0 then i must be a multiple of npages,
601 * since huge pages are only used to back the guest at guest
602 * real addresses that are a multiple of their size.
603 * Since we have at most one PTE covering any given guest
604 * real address, if npages > 1 we can skip to i + npages.
605 */
606 j = i + 1;
607 if (npages)
608 for (j = i; npages; ++j, --npages)
609 __set_bit_le(j, map);
610 }
611 return 0;
612}
613
8cf4ecc0
PM
614static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info,
615 int psize, int *indexp)
616{
617 if (!mmu_psize_defs[psize].shift)
618 return;
619 info->ap_encodings[*indexp] = mmu_psize_defs[psize].shift |
620 (mmu_psize_defs[psize].ap << 29);
621 ++(*indexp);
622}
623
624int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info)
625{
626 int i;
627
628 if (!radix_enabled())
629 return -EINVAL;
630 memset(info, 0, sizeof(*info));
631
632 /* 4k page size */
633 info->geometries[0].page_shift = 12;
634 info->geometries[0].level_bits[0] = 9;
635 for (i = 1; i < 4; ++i)
636 info->geometries[0].level_bits[i] = p9_supported_radix_bits[i];
637 /* 64k page size */
638 info->geometries[1].page_shift = 16;
639 for (i = 0; i < 4; ++i)
640 info->geometries[1].level_bits[i] = p9_supported_radix_bits[i];
641
642 i = 0;
643 add_rmmu_ap_encoding(info, MMU_PAGE_4K, &i);
644 add_rmmu_ap_encoding(info, MMU_PAGE_64K, &i);
645 add_rmmu_ap_encoding(info, MMU_PAGE_2M, &i);
646 add_rmmu_ap_encoding(info, MMU_PAGE_1G, &i);
647
648 return 0;
649}
650
651int kvmppc_init_vm_radix(struct kvm *kvm)
652{
653 kvm->arch.pgtable = pgd_alloc(kvm->mm);
654 if (!kvm->arch.pgtable)
655 return -ENOMEM;
656 return 0;
657}
658
5a319350
PM
659void kvmppc_free_radix(struct kvm *kvm)
660{
661 unsigned long ig, iu, im;
662 pte_t *pte;
663 pmd_t *pmd;
664 pud_t *pud;
665 pgd_t *pgd;
666
667 if (!kvm->arch.pgtable)
668 return;
669 pgd = kvm->arch.pgtable;
670 for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
671 if (!pgd_present(*pgd))
672 continue;
673 pud = pud_offset(pgd, 0);
674 for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++pud) {
675 if (!pud_present(*pud))
676 continue;
677 pmd = pmd_offset(pud, 0);
678 for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) {
679 if (pmd_huge(*pmd)) {
680 pmd_clear(pmd);
681 continue;
682 }
683 if (!pmd_present(*pmd))
684 continue;
685 pte = pte_offset_map(pmd, 0);
686 memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
687 kvmppc_pte_free(pte);
688 pmd_clear(pmd);
689 }
690 pmd_free(kvm->mm, pmd_offset(pud, 0));
691 pud_clear(pud);
692 }
693 pud_free(kvm->mm, pud_offset(pgd, 0));
694 pgd_clear(pgd);
695 }
696 pgd_free(kvm->mm, kvm->arch.pgtable);
697}
698
699static void pte_ctor(void *addr)
700{
701 memset(addr, 0, PTE_TABLE_SIZE);
702}
703
704int kvmppc_radix_init(void)
705{
706 unsigned long size = sizeof(void *) << PTE_INDEX_SIZE;
707
708 kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor);
709 if (!kvm_pte_cache)
710 return -ENOMEM;
711 return 0;
712}
713
714void kvmppc_radix_exit(void)
715{
716 kmem_cache_destroy(kvm_pte_cache);
717}