power/mm: update pte_write and pte_wrprotect to handle savedwrite
[linux-2.6-block.git] / arch / powerpc / kvm / book3s_hv_rm_mmu.c
CommitLineData
a8606e20
PM
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 */
8
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/kvm.h>
12#include <linux/kvm_host.h>
13#include <linux/hugetlb.h>
c77162de 14#include <linux/module.h>
08fe1e7b 15#include <linux/log2.h>
a8606e20
PM
16
17#include <asm/tlbflush.h>
18#include <asm/kvm_ppc.h>
19#include <asm/kvm_book3s.h>
f64e8084 20#include <asm/book3s/64/mmu-hash.h>
a8606e20
PM
21#include <asm/hvcall.h>
22#include <asm/synch.h>
23#include <asm/ppc-opcode.h>
24
8936dda4
PM
25/* Translate address of a vmalloc'd thing to a linear map address */
26static void *real_vmalloc_addr(void *x)
27{
28 unsigned long addr = (unsigned long) x;
29 pte_t *p;
691e95fd
AK
30 /*
31 * assume we don't have huge pages in vmalloc space...
32 * So don't worry about THP collapse/split. Called
33 * Only in realmode, hence won't need irq_save/restore.
34 */
891121e6 35 p = __find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL, NULL);
8936dda4
PM
36 if (!p || !pte_present(*p))
37 return NULL;
8936dda4
PM
38 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
39 return __va(addr);
40}
a8606e20 41
1b400ba0
PM
42/* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
43static int global_invalidates(struct kvm *kvm, unsigned long flags)
44{
45 int global;
a29ebeaf 46 int cpu;
1b400ba0
PM
47
48 /*
49 * If there is only one vcore, and it's currently running,
55765483 50 * as indicated by local_paca->kvm_hstate.kvm_vcpu being set,
1b400ba0
PM
51 * we can use tlbiel as long as we mark all other physical
52 * cores as potentially having stale TLB entries for this lpid.
1b400ba0
PM
53 * Otherwise, don't use tlbiel.
54 */
55765483 55 if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)
1b400ba0 56 global = 0;
1b400ba0 57 else
c17b98cf 58 global = 1;
1b400ba0
PM
59
60 if (!global) {
61 /* any other core might now have stale TLB entries... */
62 smp_wmb();
63 cpumask_setall(&kvm->arch.need_tlb_flush);
a29ebeaf
PM
64 cpu = local_paca->kvm_hstate.kvm_vcore->pcpu;
65 /*
66 * On POWER9, threads are independent but the TLB is shared,
67 * so use the bit for the first thread to represent the core.
68 */
69 if (cpu_has_feature(CPU_FTR_ARCH_300))
70 cpu = cpu_first_thread_sibling(cpu);
71 cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
1b400ba0
PM
72 }
73
74 return global;
75}
76
06ce2c63
PM
77/*
78 * Add this HPTE into the chain for the real page.
79 * Must be called with the chain locked; it unlocks the chain.
80 */
342d3db7 81void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
06ce2c63
PM
82 unsigned long *rmap, long pte_index, int realmode)
83{
84 struct revmap_entry *head, *tail;
85 unsigned long i;
86
87 if (*rmap & KVMPPC_RMAP_PRESENT) {
88 i = *rmap & KVMPPC_RMAP_INDEX;
3f9d4f5a 89 head = &kvm->arch.hpt.rev[i];
06ce2c63
PM
90 if (realmode)
91 head = real_vmalloc_addr(head);
3f9d4f5a 92 tail = &kvm->arch.hpt.rev[head->back];
06ce2c63
PM
93 if (realmode)
94 tail = real_vmalloc_addr(tail);
95 rev->forw = i;
96 rev->back = head->back;
97 tail->forw = pte_index;
98 head->back = pte_index;
99 } else {
100 rev->forw = rev->back = pte_index;
4879f241
PM
101 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) |
102 pte_index | KVMPPC_RMAP_PRESENT;
06ce2c63 103 }
4879f241 104 unlock_rmap(rmap);
06ce2c63 105}
342d3db7 106EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
06ce2c63 107
08fe1e7b
PM
108/* Update the changed page order field of an rmap entry */
109void kvmppc_update_rmap_change(unsigned long *rmap, unsigned long psize)
110{
111 unsigned long order;
112
113 if (!psize)
114 return;
115 order = ilog2(psize);
116 order <<= KVMPPC_RMAP_CHG_SHIFT;
117 if (order > (*rmap & KVMPPC_RMAP_CHG_ORDER))
118 *rmap = (*rmap & ~KVMPPC_RMAP_CHG_ORDER) | order;
119}
120EXPORT_SYMBOL_GPL(kvmppc_update_rmap_change);
121
cdeee518
PM
122/* Returns a pointer to the revmap entry for the page mapped by a HPTE */
123static unsigned long *revmap_for_hpte(struct kvm *kvm, unsigned long hpte_v,
124 unsigned long hpte_gr)
125{
126 struct kvm_memory_slot *memslot;
127 unsigned long *rmap;
128 unsigned long gfn;
129
130 gfn = hpte_rpn(hpte_gr, hpte_page_size(hpte_v, hpte_gr));
131 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
132 if (!memslot)
133 return NULL;
134
135 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
136 return rmap;
137}
138
06ce2c63
PM
139/* Remove this HPTE from the chain for a real page */
140static void remove_revmap_chain(struct kvm *kvm, long pte_index,
bad3b507
PM
141 struct revmap_entry *rev,
142 unsigned long hpte_v, unsigned long hpte_r)
06ce2c63 143{
bad3b507 144 struct revmap_entry *next, *prev;
cdeee518 145 unsigned long ptel, head;
06ce2c63 146 unsigned long *rmap;
bad3b507 147 unsigned long rcbits;
06ce2c63 148
bad3b507
PM
149 rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
150 ptel = rev->guest_rpte |= rcbits;
cdeee518
PM
151 rmap = revmap_for_hpte(kvm, hpte_v, ptel);
152 if (!rmap)
06ce2c63 153 return;
06ce2c63
PM
154 lock_rmap(rmap);
155
156 head = *rmap & KVMPPC_RMAP_INDEX;
3f9d4f5a
DG
157 next = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->forw]);
158 prev = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->back]);
06ce2c63
PM
159 next->back = rev->back;
160 prev->forw = rev->forw;
161 if (head == pte_index) {
162 head = rev->forw;
163 if (head == pte_index)
164 *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
165 else
166 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
167 }
bad3b507 168 *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
08fe1e7b
PM
169 if (rcbits & HPTE_R_C)
170 kvmppc_update_rmap_change(rmap, hpte_page_size(hpte_v, hpte_r));
06ce2c63
PM
171 unlock_rmap(rmap);
172}
173
7ed661bf
PM
174long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
175 long pte_index, unsigned long pteh, unsigned long ptel,
176 pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
a8606e20 177{
c77162de 178 unsigned long i, pa, gpa, gfn, psize;
342d3db7 179 unsigned long slot_fn, hva;
6f22bd32 180 __be64 *hpte;
8936dda4 181 struct revmap_entry *rev;
44e5f6be 182 unsigned long g_ptel;
b2b2f165 183 struct kvm_memory_slot *memslot;
dac56570 184 unsigned hpage_shift;
30bda41a 185 bool is_ci;
06ce2c63 186 unsigned long *rmap;
dac56570 187 pte_t *ptep;
4cf302bc 188 unsigned int writing;
342d3db7 189 unsigned long mmu_seq;
691e95fd 190 unsigned long rcbits, irq_flags = 0;
c77162de 191
65dae540
PM
192 if (kvm_is_radix(kvm))
193 return H_FUNCTION;
c77162de
PM
194 psize = hpte_page_size(pteh, ptel);
195 if (!psize)
a8606e20 196 return H_PARAMETER;
4cf302bc 197 writing = hpte_is_writable(ptel);
697d3899 198 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
44e5f6be
PM
199 ptel &= ~HPTE_GR_RESERVED;
200 g_ptel = ptel;
b2b2f165 201
342d3db7
PM
202 /* used later to detect if we might have been invalidated */
203 mmu_seq = kvm->mmu_notifier_seq;
204 smp_rmb();
205
c77162de
PM
206 /* Find the memslot (if any) for this address */
207 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
208 gfn = gpa >> PAGE_SHIFT;
797f9c07 209 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
697d3899 210 pa = 0;
30bda41a 211 is_ci = false;
697d3899
PM
212 rmap = NULL;
213 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
697d3899
PM
214 /* Emulated MMIO - mark this with key=31 */
215 pteh |= HPTE_V_ABSENT;
216 ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
217 goto do_insert;
218 }
da9d1d7f
PM
219
220 /* Check if the requested page fits entirely in the memslot. */
221 if (!slot_is_aligned(memslot, psize))
222 return H_PARAMETER;
c77162de 223 slot_fn = gfn - memslot->base_gfn;
d89cc617 224 rmap = &memslot->arch.rmap[slot_fn];
c77162de 225
c17b98cf
PM
226 /* Translate to host virtual address */
227 hva = __gfn_to_hva_memslot(memslot, gfn);
691e95fd
AK
228 /*
229 * If we had a page table table change after lookup, we would
230 * retry via mmu_notifier_retry.
231 */
232 if (realmode)
891121e6
AK
233 ptep = __find_linux_pte_or_hugepte(pgdir, hva, NULL,
234 &hpage_shift);
691e95fd
AK
235 else {
236 local_irq_save(irq_flags);
891121e6
AK
237 ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL,
238 &hpage_shift);
342d3db7 239 }
dac56570
AK
240 if (ptep) {
241 pte_t pte;
242 unsigned int host_pte_size;
7ed661bf 243
dac56570
AK
244 if (hpage_shift)
245 host_pte_size = 1ul << hpage_shift;
246 else
247 host_pte_size = PAGE_SIZE;
248 /*
249 * We should always find the guest page size
250 * to <= host page size, if host is using hugepage
251 */
691e95fd
AK
252 if (host_pte_size < psize) {
253 if (!realmode)
254 local_irq_restore(flags);
dac56570 255 return H_PARAMETER;
691e95fd 256 }
7d6e7f7f 257 pte = kvmppc_read_update_linux_pte(ptep, writing);
dac56570 258 if (pte_present(pte) && !pte_protnone(pte)) {
d19469e8 259 if (writing && !__pte_write(pte))
dac56570
AK
260 /* make the actual HPTE be read-only */
261 ptel = hpte_make_readonly(ptel);
30bda41a 262 is_ci = pte_ci(pte);
dac56570
AK
263 pa = pte_pfn(pte) << PAGE_SHIFT;
264 pa |= hva & (host_pte_size - 1);
265 pa |= gpa & ~PAGE_MASK;
266 }
267 }
691e95fd
AK
268 if (!realmode)
269 local_irq_restore(irq_flags);
c77162de
PM
270
271 ptel &= ~(HPTE_R_PP0 - psize);
272 ptel |= pa;
342d3db7
PM
273
274 if (pa)
275 pteh |= HPTE_V_VALID;
f0585982 276 else {
342d3db7 277 pteh |= HPTE_V_ABSENT;
f0585982
YX
278 ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
279 }
c77162de 280
30bda41a
AK
281 /*If we had host pte mapping then Check WIMG */
282 if (ptep && !hpte_cache_flags_ok(ptel, is_ci)) {
283 if (is_ci)
9d0ef5ea
PM
284 return H_PARAMETER;
285 /*
286 * Allow guest to map emulated device memory as
287 * uncacheable, but actually make it cacheable.
288 */
289 ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
290 ptel |= HPTE_R_M;
291 }
075295dd 292
342d3db7 293 /* Find and lock the HPTEG slot to use */
697d3899 294 do_insert:
3d089f84 295 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
a8606e20
PM
296 return H_PARAMETER;
297 if (likely((flags & H_EXACT) == 0)) {
298 pte_index &= ~7UL;
3f9d4f5a 299 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
075295dd 300 for (i = 0; i < 8; ++i) {
6f22bd32 301 if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
697d3899
PM
302 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
303 HPTE_V_ABSENT))
a8606e20
PM
304 break;
305 hpte += 2;
306 }
075295dd
PM
307 if (i == 8) {
308 /*
309 * Since try_lock_hpte doesn't retry (not even stdcx.
310 * failures), it could be that there is a free slot
311 * but we transiently failed to lock it. Try again,
312 * actually locking each slot and checking it.
313 */
314 hpte -= 16;
315 for (i = 0; i < 8; ++i) {
6f22bd32 316 u64 pte;
075295dd
PM
317 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
318 cpu_relax();
a4bd6eb0 319 pte = be64_to_cpu(hpte[0]);
6f22bd32 320 if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
075295dd 321 break;
a4bd6eb0 322 __unlock_hpte(hpte, pte);
075295dd
PM
323 hpte += 2;
324 }
325 if (i == 8)
326 return H_PTEG_FULL;
327 }
8936dda4 328 pte_index += i;
a8606e20 329 } else {
3f9d4f5a 330 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
697d3899
PM
331 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
332 HPTE_V_ABSENT)) {
075295dd 333 /* Lock the slot and check again */
6f22bd32
AG
334 u64 pte;
335
075295dd
PM
336 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
337 cpu_relax();
a4bd6eb0 338 pte = be64_to_cpu(hpte[0]);
6f22bd32 339 if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
a4bd6eb0 340 __unlock_hpte(hpte, pte);
075295dd
PM
341 return H_PTEG_FULL;
342 }
343 }
a8606e20 344 }
8936dda4
PM
345
346 /* Save away the guest's idea of the second HPTE dword */
3f9d4f5a 347 rev = &kvm->arch.hpt.rev[pte_index];
06ce2c63
PM
348 if (realmode)
349 rev = real_vmalloc_addr(rev);
44e5f6be 350 if (rev) {
8936dda4 351 rev->guest_rpte = g_ptel;
44e5f6be
PM
352 note_hpte_modification(kvm, rev);
353 }
06ce2c63
PM
354
355 /* Link HPTE into reverse-map chain */
697d3899
PM
356 if (pteh & HPTE_V_VALID) {
357 if (realmode)
358 rmap = real_vmalloc_addr(rmap);
359 lock_rmap(rmap);
342d3db7 360 /* Check for pending invalidations under the rmap chain lock */
c17b98cf 361 if (mmu_notifier_retry(kvm, mmu_seq)) {
342d3db7
PM
362 /* inval in progress, write a non-present HPTE */
363 pteh |= HPTE_V_ABSENT;
364 pteh &= ~HPTE_V_VALID;
f0585982 365 ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
342d3db7
PM
366 unlock_rmap(rmap);
367 } else {
368 kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
369 realmode);
bad3b507
PM
370 /* Only set R/C in real HPTE if already set in *rmap */
371 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
372 ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C);
342d3db7 373 }
697d3899 374 }
06ce2c63 375
abb7c7dd
PM
376 /* Convert to new format on P9 */
377 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
378 ptel = hpte_old_to_new_r(pteh, ptel);
379 pteh = hpte_old_to_new_v(pteh);
380 }
6f22bd32 381 hpte[1] = cpu_to_be64(ptel);
06ce2c63
PM
382
383 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
a8606e20 384 eieio();
a4bd6eb0 385 __unlock_hpte(hpte, pteh);
a8606e20 386 asm volatile("ptesync" : : : "memory");
06ce2c63 387
7ed661bf 388 *pte_idx_ret = pte_index;
a8606e20
PM
389 return H_SUCCESS;
390}
7ed661bf
PM
391EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
392
393long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
394 long pte_index, unsigned long pteh, unsigned long ptel)
395{
396 return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
397 vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]);
398}
a8606e20 399
54bb7f4b 400#ifdef __BIG_ENDIAN__
a8606e20 401#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
54bb7f4b
AB
402#else
403#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
404#endif
a8606e20 405
a56ee9f8
YX
406static inline int is_mmio_hpte(unsigned long v, unsigned long r)
407{
408 return ((v & HPTE_V_ABSENT) &&
409 (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
410 (HPTE_R_KEY_HI | HPTE_R_KEY_LO));
411}
412
a8606e20
PM
413static inline int try_lock_tlbie(unsigned int *lock)
414{
415 unsigned int tmp, old;
416 unsigned int token = LOCK_TOKEN;
417
418 asm volatile("1:lwarx %1,0,%2\n"
419 " cmpwi cr0,%1,0\n"
420 " bne 2f\n"
421 " stwcx. %3,0,%2\n"
422 " bne- 1b\n"
423 " isync\n"
424 "2:"
425 : "=&r" (tmp), "=&r" (old)
426 : "r" (lock), "r" (token)
427 : "cc", "memory");
428 return old == 0;
429}
430
54480501
PM
431static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
432 long npages, int global, bool need_sync)
433{
434 long i;
435
7c5b06ca
PM
436 /*
437 * We use the POWER9 5-operand versions of tlbie and tlbiel here.
438 * Since we are using RIC=0 PRS=0 R=0, and P7/P8 tlbiel ignores
439 * the RS field, this is backwards-compatible with P7 and P8.
440 */
54480501
PM
441 if (global) {
442 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
443 cpu_relax();
444 if (need_sync)
445 asm volatile("ptesync" : : : "memory");
446 for (i = 0; i < npages; ++i)
7c5b06ca 447 asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
54480501
PM
448 "r" (rbvalues[i]), "r" (kvm->arch.lpid));
449 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
450 kvm->arch.tlbie_lock = 0;
451 } else {
452 if (need_sync)
453 asm volatile("ptesync" : : : "memory");
454 for (i = 0; i < npages; ++i)
7c5b06ca
PM
455 asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : :
456 "r" (rbvalues[i]), "r" (0));
54480501
PM
457 asm volatile("ptesync" : : : "memory");
458 }
459}
460
6b445ad4
PM
461long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
462 unsigned long pte_index, unsigned long avpn,
463 unsigned long *hpret)
a8606e20 464{
6f22bd32 465 __be64 *hpte;
a8606e20 466 unsigned long v, r, rb;
a92bce95 467 struct revmap_entry *rev;
abb7c7dd 468 u64 pte, orig_pte, pte_r;
a8606e20 469
65dae540
PM
470 if (kvm_is_radix(kvm))
471 return H_FUNCTION;
3d089f84 472 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
a8606e20 473 return H_PARAMETER;
3f9d4f5a 474 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
075295dd 475 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
a8606e20 476 cpu_relax();
abb7c7dd
PM
477 pte = orig_pte = be64_to_cpu(hpte[0]);
478 pte_r = be64_to_cpu(hpte[1]);
479 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
480 pte = hpte_new_to_old_v(pte, pte_r);
481 pte_r = hpte_new_to_old_r(pte_r);
482 }
6f22bd32
AG
483 if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
484 ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
485 ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
abb7c7dd 486 __unlock_hpte(hpte, orig_pte);
a8606e20
PM
487 return H_NOT_FOUND;
488 }
a92bce95 489
3f9d4f5a 490 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
6f22bd32 491 v = pte & ~HPTE_V_HVLOCK;
a92bce95 492 if (v & HPTE_V_VALID) {
6f22bd32 493 hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
abb7c7dd 494 rb = compute_tlbie_rb(v, pte_r, pte_index);
54480501 495 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
1e5bf454
PM
496 /*
497 * The reference (R) and change (C) bits in a HPT
498 * entry can be set by hardware at any time up until
499 * the HPTE is invalidated and the TLB invalidation
500 * sequence has completed. This means that when
501 * removing a HPTE, we need to re-read the HPTE after
502 * the invalidation sequence has completed in order to
503 * obtain reliable values of R and C.
504 */
505 remove_revmap_chain(kvm, pte_index, rev, v,
506 be64_to_cpu(hpte[1]));
a8606e20 507 }
44e5f6be
PM
508 r = rev->guest_rpte & ~HPTE_GR_RESERVED;
509 note_hpte_modification(kvm, rev);
a92bce95
PM
510 unlock_hpte(hpte, 0);
511
abb7c7dd 512 if (is_mmio_hpte(v, pte_r))
a56ee9f8
YX
513 atomic64_inc(&kvm->arch.mmio_update);
514
c64dfe2a
PM
515 if (v & HPTE_V_ABSENT)
516 v = (v & ~HPTE_V_ABSENT) | HPTE_V_VALID;
6b445ad4
PM
517 hpret[0] = v;
518 hpret[1] = r;
a8606e20
PM
519 return H_SUCCESS;
520}
6b445ad4
PM
521EXPORT_SYMBOL_GPL(kvmppc_do_h_remove);
522
523long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
524 unsigned long pte_index, unsigned long avpn)
525{
526 return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
527 &vcpu->arch.gpr[4]);
528}
a8606e20
PM
529
530long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
531{
532 struct kvm *kvm = vcpu->kvm;
533 unsigned long *args = &vcpu->arch.gpr[4];
6f22bd32
AG
534 __be64 *hp, *hptes[4];
535 unsigned long tlbrb[4];
a92bce95
PM
536 long int i, j, k, n, found, indexes[4];
537 unsigned long flags, req, pte_index, rcbits;
54480501 538 int global;
a8606e20 539 long int ret = H_SUCCESS;
a92bce95 540 struct revmap_entry *rev, *revs[4];
a56ee9f8 541 u64 hp0, hp1;
a8606e20 542
65dae540
PM
543 if (kvm_is_radix(kvm))
544 return H_FUNCTION;
54480501 545 global = global_invalidates(kvm, 0);
a92bce95
PM
546 for (i = 0; i < 4 && ret == H_SUCCESS; ) {
547 n = 0;
548 for (; i < 4; ++i) {
549 j = i * 2;
550 pte_index = args[j];
551 flags = pte_index >> 56;
552 pte_index &= ((1ul << 56) - 1);
553 req = flags >> 6;
554 flags &= 3;
555 if (req == 3) { /* no more requests */
556 i = 4;
a8606e20 557 break;
a92bce95 558 }
32fad281 559 if (req != 1 || flags == 3 ||
3d089f84 560 pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) {
a92bce95
PM
561 /* parameter error */
562 args[j] = ((0xa0 | flags) << 56) + pte_index;
563 ret = H_PARAMETER;
a8606e20 564 break;
a92bce95 565 }
3f9d4f5a 566 hp = (__be64 *) (kvm->arch.hpt.virt + (pte_index << 4));
a92bce95
PM
567 /* to avoid deadlock, don't spin except for first */
568 if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
569 if (n)
570 break;
571 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
572 cpu_relax();
573 }
574 found = 0;
6f22bd32 575 hp0 = be64_to_cpu(hp[0]);
a56ee9f8 576 hp1 = be64_to_cpu(hp[1]);
abb7c7dd
PM
577 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
578 hp0 = hpte_new_to_old_v(hp0, hp1);
579 hp1 = hpte_new_to_old_r(hp1);
580 }
6f22bd32 581 if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) {
a92bce95
PM
582 switch (flags & 3) {
583 case 0: /* absolute */
a8606e20 584 found = 1;
a92bce95
PM
585 break;
586 case 1: /* andcond */
6f22bd32 587 if (!(hp0 & args[j + 1]))
a92bce95
PM
588 found = 1;
589 break;
590 case 2: /* AVPN */
6f22bd32 591 if ((hp0 & ~0x7fUL) == args[j + 1])
a92bce95
PM
592 found = 1;
593 break;
594 }
595 }
596 if (!found) {
6f22bd32 597 hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
a92bce95
PM
598 args[j] = ((0x90 | flags) << 56) + pte_index;
599 continue;
a8606e20 600 }
a92bce95
PM
601
602 args[j] = ((0x80 | flags) << 56) + pte_index;
3f9d4f5a 603 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
44e5f6be 604 note_hpte_modification(kvm, rev);
a92bce95 605
6f22bd32 606 if (!(hp0 & HPTE_V_VALID)) {
bad3b507
PM
607 /* insert R and C bits from PTE */
608 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
609 args[j] |= rcbits << (56 - 5);
51bfd299 610 hp[0] = 0;
a56ee9f8
YX
611 if (is_mmio_hpte(hp0, hp1))
612 atomic64_inc(&kvm->arch.mmio_update);
a92bce95 613 continue;
bad3b507 614 }
a92bce95 615
6f22bd32
AG
616 /* leave it locked */
617 hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
abb7c7dd 618 tlbrb[n] = compute_tlbie_rb(hp0, hp1, pte_index);
a92bce95
PM
619 indexes[n] = j;
620 hptes[n] = hp;
621 revs[n] = rev;
622 ++n;
a8606e20 623 }
a92bce95
PM
624
625 if (!n)
626 break;
627
628 /* Now that we've collected a batch, do the tlbies */
54480501 629 do_tlbies(kvm, tlbrb, n, global, true);
a92bce95 630
bad3b507 631 /* Read PTE low words after tlbie to get final R/C values */
a92bce95
PM
632 for (k = 0; k < n; ++k) {
633 j = indexes[k];
634 pte_index = args[j] & ((1ul << 56) - 1);
635 hp = hptes[k];
636 rev = revs[k];
6f22bd32
AG
637 remove_revmap_chain(kvm, pte_index, rev,
638 be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
bad3b507
PM
639 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
640 args[j] |= rcbits << (56 - 5);
a4bd6eb0 641 __unlock_hpte(hp, 0);
697d3899 642 }
a8606e20 643 }
a92bce95 644
a8606e20
PM
645 return ret;
646}
647
648long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
649 unsigned long pte_index, unsigned long avpn,
650 unsigned long va)
651{
652 struct kvm *kvm = vcpu->kvm;
6f22bd32 653 __be64 *hpte;
8936dda4
PM
654 struct revmap_entry *rev;
655 unsigned long v, r, rb, mask, bits;
abb7c7dd 656 u64 pte_v, pte_r;
a8606e20 657
65dae540
PM
658 if (kvm_is_radix(kvm))
659 return H_FUNCTION;
3d089f84 660 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
a8606e20 661 return H_PARAMETER;
697d3899 662
3f9d4f5a 663 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
075295dd 664 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
a8606e20 665 cpu_relax();
abb7c7dd
PM
666 v = pte_v = be64_to_cpu(hpte[0]);
667 if (cpu_has_feature(CPU_FTR_ARCH_300))
668 v = hpte_new_to_old_v(v, be64_to_cpu(hpte[1]));
669 if ((v & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
670 ((flags & H_AVPN) && (v & ~0x7fUL) != avpn)) {
671 __unlock_hpte(hpte, pte_v);
a8606e20
PM
672 return H_NOT_FOUND;
673 }
697d3899 674
abb7c7dd 675 pte_r = be64_to_cpu(hpte[1]);
8936dda4
PM
676 bits = (flags << 55) & HPTE_R_PP0;
677 bits |= (flags << 48) & HPTE_R_KEY_HI;
678 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
679
680 /* Update guest view of 2nd HPTE dword */
681 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
682 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
3f9d4f5a 683 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
8936dda4
PM
684 if (rev) {
685 r = (rev->guest_rpte & ~mask) | bits;
686 rev->guest_rpte = r;
44e5f6be 687 note_hpte_modification(kvm, rev);
8936dda4 688 }
8936dda4
PM
689
690 /* Update HPTE */
697d3899 691 if (v & HPTE_V_VALID) {
1cc8ed0b 692 /*
b4a83900
PM
693 * If the page is valid, don't let it transition from
694 * readonly to writable. If it should be writable, we'll
695 * take a trap and let the page fault code sort it out.
1cc8ed0b 696 */
abb7c7dd
PM
697 r = (pte_r & ~mask) | bits;
698 if (hpte_is_writable(r) && !hpte_is_writable(pte_r))
b4a83900
PM
699 r = hpte_make_readonly(r);
700 /* If the PTE is changing, invalidate it first */
abb7c7dd 701 if (r != pte_r) {
b4a83900 702 rb = compute_tlbie_rb(v, r, pte_index);
abb7c7dd 703 hpte[0] = cpu_to_be64((pte_v & ~HPTE_V_VALID) |
b4a83900
PM
704 HPTE_V_ABSENT);
705 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags),
706 true);
f064a0de
PM
707 /* Don't lose R/C bit updates done by hardware */
708 r |= be64_to_cpu(hpte[1]) & (HPTE_R_R | HPTE_R_C);
b4a83900 709 hpte[1] = cpu_to_be64(r);
1cc8ed0b 710 }
a8606e20 711 }
abb7c7dd 712 unlock_hpte(hpte, pte_v & ~HPTE_V_HVLOCK);
a8606e20 713 asm volatile("ptesync" : : : "memory");
abb7c7dd 714 if (is_mmio_hpte(v, pte_r))
a56ee9f8
YX
715 atomic64_inc(&kvm->arch.mmio_update);
716
a8606e20
PM
717 return H_SUCCESS;
718}
719
a8606e20
PM
720long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
721 unsigned long pte_index)
722{
723 struct kvm *kvm = vcpu->kvm;
6f22bd32
AG
724 __be64 *hpte;
725 unsigned long v, r;
a8606e20 726 int i, n = 1;
8936dda4 727 struct revmap_entry *rev = NULL;
a8606e20 728
65dae540
PM
729 if (kvm_is_radix(kvm))
730 return H_FUNCTION;
3d089f84 731 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
a8606e20
PM
732 return H_PARAMETER;
733 if (flags & H_READ_4) {
734 pte_index &= ~3;
735 n = 4;
736 }
3f9d4f5a 737 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
a8606e20 738 for (i = 0; i < n; ++i, ++pte_index) {
3f9d4f5a 739 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
6f22bd32
AG
740 v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
741 r = be64_to_cpu(hpte[1]);
abb7c7dd
PM
742 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
743 v = hpte_new_to_old_v(v, r);
744 r = hpte_new_to_old_r(r);
745 }
697d3899
PM
746 if (v & HPTE_V_ABSENT) {
747 v &= ~HPTE_V_ABSENT;
748 v |= HPTE_V_VALID;
749 }
44e5f6be 750 if (v & HPTE_V_VALID) {
bad3b507 751 r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
44e5f6be
PM
752 r &= ~HPTE_GR_RESERVED;
753 }
697d3899 754 vcpu->arch.gpr[4 + i * 2] = v;
a8606e20
PM
755 vcpu->arch.gpr[5 + i * 2] = r;
756 }
757 return H_SUCCESS;
758}
697d3899 759
cdeee518
PM
760long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
761 unsigned long pte_index)
762{
763 struct kvm *kvm = vcpu->kvm;
764 __be64 *hpte;
765 unsigned long v, r, gr;
766 struct revmap_entry *rev;
767 unsigned long *rmap;
768 long ret = H_NOT_FOUND;
769
65dae540
PM
770 if (kvm_is_radix(kvm))
771 return H_FUNCTION;
3d089f84 772 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
cdeee518
PM
773 return H_PARAMETER;
774
3f9d4f5a
DG
775 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
776 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
cdeee518
PM
777 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
778 cpu_relax();
779 v = be64_to_cpu(hpte[0]);
780 r = be64_to_cpu(hpte[1]);
781 if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
782 goto out;
783
784 gr = rev->guest_rpte;
785 if (rev->guest_rpte & HPTE_R_R) {
786 rev->guest_rpte &= ~HPTE_R_R;
787 note_hpte_modification(kvm, rev);
788 }
789 if (v & HPTE_V_VALID) {
790 gr |= r & (HPTE_R_R | HPTE_R_C);
791 if (r & HPTE_R_R) {
792 kvmppc_clear_ref_hpte(kvm, hpte, pte_index);
793 rmap = revmap_for_hpte(kvm, v, gr);
794 if (rmap) {
795 lock_rmap(rmap);
796 *rmap |= KVMPPC_RMAP_REFERENCED;
797 unlock_rmap(rmap);
798 }
799 }
800 }
801 vcpu->arch.gpr[4] = gr;
802 ret = H_SUCCESS;
803 out:
804 unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
805 return ret;
806}
807
808long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
809 unsigned long pte_index)
810{
811 struct kvm *kvm = vcpu->kvm;
812 __be64 *hpte;
813 unsigned long v, r, gr;
814 struct revmap_entry *rev;
815 unsigned long *rmap;
816 long ret = H_NOT_FOUND;
817
65dae540
PM
818 if (kvm_is_radix(kvm))
819 return H_FUNCTION;
3d089f84 820 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
cdeee518
PM
821 return H_PARAMETER;
822
3f9d4f5a
DG
823 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
824 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
cdeee518
PM
825 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
826 cpu_relax();
827 v = be64_to_cpu(hpte[0]);
828 r = be64_to_cpu(hpte[1]);
829 if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
830 goto out;
831
832 gr = rev->guest_rpte;
833 if (gr & HPTE_R_C) {
834 rev->guest_rpte &= ~HPTE_R_C;
835 note_hpte_modification(kvm, rev);
836 }
837 if (v & HPTE_V_VALID) {
838 /* need to make it temporarily absent so C is stable */
839 hpte[0] |= cpu_to_be64(HPTE_V_ABSENT);
840 kvmppc_invalidate_hpte(kvm, hpte, pte_index);
841 r = be64_to_cpu(hpte[1]);
842 gr |= r & (HPTE_R_R | HPTE_R_C);
843 if (r & HPTE_R_C) {
844 unsigned long psize = hpte_page_size(v, r);
845 hpte[1] = cpu_to_be64(r & ~HPTE_R_C);
846 eieio();
847 rmap = revmap_for_hpte(kvm, v, gr);
848 if (rmap) {
849 lock_rmap(rmap);
850 *rmap |= KVMPPC_RMAP_CHANGED;
851 kvmppc_update_rmap_change(rmap, psize);
852 unlock_rmap(rmap);
853 }
854 }
855 }
856 vcpu->arch.gpr[4] = gr;
857 ret = H_SUCCESS;
858 out:
859 unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
860 return ret;
861}
862
6f22bd32 863void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
342d3db7
PM
864 unsigned long pte_index)
865{
866 unsigned long rb;
abb7c7dd 867 u64 hp0, hp1;
342d3db7 868
6f22bd32 869 hptep[0] &= ~cpu_to_be64(HPTE_V_VALID);
abb7c7dd
PM
870 hp0 = be64_to_cpu(hptep[0]);
871 hp1 = be64_to_cpu(hptep[1]);
872 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
873 hp0 = hpte_new_to_old_v(hp0, hp1);
874 hp1 = hpte_new_to_old_r(hp1);
875 }
876 rb = compute_tlbie_rb(hp0, hp1, pte_index);
54480501 877 do_tlbies(kvm, &rb, 1, 1, true);
342d3db7
PM
878}
879EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
880
6f22bd32 881void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
55514893
PM
882 unsigned long pte_index)
883{
884 unsigned long rb;
885 unsigned char rbyte;
abb7c7dd 886 u64 hp0, hp1;
55514893 887
abb7c7dd
PM
888 hp0 = be64_to_cpu(hptep[0]);
889 hp1 = be64_to_cpu(hptep[1]);
890 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
891 hp0 = hpte_new_to_old_v(hp0, hp1);
892 hp1 = hpte_new_to_old_r(hp1);
893 }
894 rb = compute_tlbie_rb(hp0, hp1, pte_index);
6f22bd32 895 rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8;
55514893
PM
896 /* modify only the second-last byte, which contains the ref bit */
897 *((char *)hptep + 14) = rbyte;
54480501 898 do_tlbies(kvm, &rb, 1, 1, false);
55514893
PM
899}
900EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
901
697d3899
PM
902static int slb_base_page_shift[4] = {
903 24, /* 16M */
904 16, /* 64k */
905 34, /* 16G */
906 20, /* 1M, unsupported */
907};
908
a56ee9f8
YX
909static struct mmio_hpte_cache_entry *mmio_cache_search(struct kvm_vcpu *vcpu,
910 unsigned long eaddr, unsigned long slb_v, long mmio_update)
911{
912 struct mmio_hpte_cache_entry *entry = NULL;
913 unsigned int pshift;
914 unsigned int i;
915
916 for (i = 0; i < MMIO_HPTE_CACHE_SIZE; i++) {
917 entry = &vcpu->arch.mmio_cache.entry[i];
918 if (entry->mmio_update == mmio_update) {
919 pshift = entry->slb_base_pshift;
920 if ((entry->eaddr >> pshift) == (eaddr >> pshift) &&
921 entry->slb_v == slb_v)
922 return entry;
923 }
924 }
925 return NULL;
926}
927
928static struct mmio_hpte_cache_entry *
929 next_mmio_cache_entry(struct kvm_vcpu *vcpu)
930{
931 unsigned int index = vcpu->arch.mmio_cache.index;
932
933 vcpu->arch.mmio_cache.index++;
934 if (vcpu->arch.mmio_cache.index == MMIO_HPTE_CACHE_SIZE)
935 vcpu->arch.mmio_cache.index = 0;
936
937 return &vcpu->arch.mmio_cache.entry[index];
938}
939
91648ec0 940/* When called from virtmode, this func should be protected by
941 * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
942 * can trigger deadlock issue.
943 */
697d3899
PM
944long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
945 unsigned long valid)
946{
947 unsigned int i;
948 unsigned int pshift;
949 unsigned long somask;
950 unsigned long vsid, hash;
951 unsigned long avpn;
6f22bd32 952 __be64 *hpte;
697d3899 953 unsigned long mask, val;
abb7c7dd 954 unsigned long v, r, orig_v;
697d3899
PM
955
956 /* Get page shift, work out hash and AVPN etc. */
957 mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
958 val = 0;
959 pshift = 12;
960 if (slb_v & SLB_VSID_L) {
961 mask |= HPTE_V_LARGE;
962 val |= HPTE_V_LARGE;
963 pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
964 }
965 if (slb_v & SLB_VSID_B_1T) {
966 somask = (1UL << 40) - 1;
967 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
968 vsid ^= vsid << 25;
969 } else {
970 somask = (1UL << 28) - 1;
971 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
972 }
3d089f84 973 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvmppc_hpt_mask(&kvm->arch.hpt);
697d3899
PM
974 avpn = slb_v & ~(somask >> 16); /* also includes B */
975 avpn |= (eaddr & somask) >> 16;
976
977 if (pshift >= 24)
978 avpn &= ~((1UL << (pshift - 16)) - 1);
979 else
980 avpn &= ~0x7fUL;
981 val |= avpn;
982
983 for (;;) {
3f9d4f5a 984 hpte = (__be64 *)(kvm->arch.hpt.virt + (hash << 7));
697d3899
PM
985
986 for (i = 0; i < 16; i += 2) {
987 /* Read the PTE racily */
6f22bd32 988 v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
abb7c7dd
PM
989 if (cpu_has_feature(CPU_FTR_ARCH_300))
990 v = hpte_new_to_old_v(v, be64_to_cpu(hpte[i+1]));
697d3899
PM
991
992 /* Check valid/absent, hash, segment size and AVPN */
993 if (!(v & valid) || (v & mask) != val)
994 continue;
995
996 /* Lock the PTE and read it under the lock */
997 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
998 cpu_relax();
abb7c7dd 999 v = orig_v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
6f22bd32 1000 r = be64_to_cpu(hpte[i+1]);
abb7c7dd
PM
1001 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1002 v = hpte_new_to_old_v(v, r);
1003 r = hpte_new_to_old_r(r);
1004 }
697d3899
PM
1005
1006 /*
341acbb3 1007 * Check the HPTE again, including base page size
697d3899
PM
1008 */
1009 if ((v & valid) && (v & mask) == val &&
341acbb3 1010 hpte_base_page_size(v, r) == (1ul << pshift))
697d3899
PM
1011 /* Return with the HPTE still locked */
1012 return (hash << 3) + (i >> 1);
1013
abb7c7dd 1014 __unlock_hpte(&hpte[i], orig_v);
697d3899
PM
1015 }
1016
1017 if (val & HPTE_V_SECONDARY)
1018 break;
1019 val |= HPTE_V_SECONDARY;
3d089f84 1020 hash = hash ^ kvmppc_hpt_mask(&kvm->arch.hpt);
697d3899
PM
1021 }
1022 return -1;
1023}
1024EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
1025
1026/*
1027 * Called in real mode to check whether an HPTE not found fault
4cf302bc
PM
1028 * is due to accessing a paged-out page or an emulated MMIO page,
1029 * or if a protection fault is due to accessing a page that the
1030 * guest wanted read/write access to but which we made read-only.
697d3899
PM
1031 * Returns a possibly modified status (DSISR) value if not
1032 * (i.e. pass the interrupt to the guest),
1033 * -1 to pass the fault up to host kernel mode code, -2 to do that
342d3db7 1034 * and also load the instruction word (for MMIO emulation),
697d3899
PM
1035 * or 0 if we should make the guest retry the access.
1036 */
1037long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
342d3db7 1038 unsigned long slb_v, unsigned int status, bool data)
697d3899
PM
1039{
1040 struct kvm *kvm = vcpu->kvm;
1041 long int index;
abb7c7dd 1042 unsigned long v, r, gr, orig_v;
6f22bd32 1043 __be64 *hpte;
697d3899
PM
1044 unsigned long valid;
1045 struct revmap_entry *rev;
1046 unsigned long pp, key;
a56ee9f8
YX
1047 struct mmio_hpte_cache_entry *cache_entry = NULL;
1048 long mmio_update = 0;
697d3899 1049
4cf302bc
PM
1050 /* For protection fault, expect to find a valid HPTE */
1051 valid = HPTE_V_VALID;
a56ee9f8 1052 if (status & DSISR_NOHPTE) {
4cf302bc 1053 valid |= HPTE_V_ABSENT;
a56ee9f8
YX
1054 mmio_update = atomic64_read(&kvm->arch.mmio_update);
1055 cache_entry = mmio_cache_search(vcpu, addr, slb_v, mmio_update);
4cf302bc 1056 }
a56ee9f8
YX
1057 if (cache_entry) {
1058 index = cache_entry->pte_index;
1059 v = cache_entry->hpte_v;
1060 r = cache_entry->hpte_r;
1061 gr = cache_entry->rpte;
1062 } else {
1063 index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
1064 if (index < 0) {
1065 if (status & DSISR_NOHPTE)
1066 return status; /* there really was no HPTE */
1067 return 0; /* for prot fault, HPTE disappeared */
1068 }
3f9d4f5a 1069 hpte = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
abb7c7dd 1070 v = orig_v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
a56ee9f8 1071 r = be64_to_cpu(hpte[1]);
abb7c7dd
PM
1072 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1073 v = hpte_new_to_old_v(v, r);
1074 r = hpte_new_to_old_r(r);
1075 }
3f9d4f5a 1076 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[index]);
a56ee9f8 1077 gr = rev->guest_rpte;
697d3899 1078
abb7c7dd 1079 unlock_hpte(hpte, orig_v);
a56ee9f8 1080 }
697d3899 1081
4cf302bc
PM
1082 /* For not found, if the HPTE is valid by now, retry the instruction */
1083 if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
697d3899
PM
1084 return 0;
1085
1086 /* Check access permissions to the page */
1087 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
1088 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
342d3db7
PM
1089 status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
1090 if (!data) {
1091 if (gr & (HPTE_R_N | HPTE_R_G))
1092 return status | SRR1_ISI_N_OR_G;
1093 if (!hpte_read_permission(pp, slb_v & key))
1094 return status | SRR1_ISI_PROT;
1095 } else if (status & DSISR_ISSTORE) {
697d3899
PM
1096 /* check write permission */
1097 if (!hpte_write_permission(pp, slb_v & key))
342d3db7 1098 return status | DSISR_PROTFAULT;
697d3899
PM
1099 } else {
1100 if (!hpte_read_permission(pp, slb_v & key))
342d3db7 1101 return status | DSISR_PROTFAULT;
697d3899
PM
1102 }
1103
1104 /* Check storage key, if applicable */
342d3db7 1105 if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
697d3899
PM
1106 unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
1107 if (status & DSISR_ISSTORE)
1108 perm >>= 1;
1109 if (perm & 1)
342d3db7 1110 return status | DSISR_KEYFAULT;
697d3899
PM
1111 }
1112
1113 /* Save HPTE info for virtual-mode handler */
1114 vcpu->arch.pgfault_addr = addr;
1115 vcpu->arch.pgfault_index = index;
1116 vcpu->arch.pgfault_hpte[0] = v;
1117 vcpu->arch.pgfault_hpte[1] = r;
a56ee9f8 1118 vcpu->arch.pgfault_cache = cache_entry;
697d3899 1119
342d3db7 1120 /* Check the storage key to see if it is possibly emulated MMIO */
a56ee9f8
YX
1121 if ((r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
1122 (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) {
1123 if (!cache_entry) {
1124 unsigned int pshift = 12;
1125 unsigned int pshift_index;
1126
1127 if (slb_v & SLB_VSID_L) {
1128 pshift_index = ((slb_v & SLB_VSID_LP) >> 4);
1129 pshift = slb_base_page_shift[pshift_index];
1130 }
1131 cache_entry = next_mmio_cache_entry(vcpu);
1132 cache_entry->eaddr = addr;
1133 cache_entry->slb_base_pshift = pshift;
1134 cache_entry->pte_index = index;
1135 cache_entry->hpte_v = v;
1136 cache_entry->hpte_r = r;
1137 cache_entry->rpte = gr;
1138 cache_entry->slb_v = slb_v;
1139 cache_entry->mmio_update = mmio_update;
1140 }
1141 if (data && (vcpu->arch.shregs.msr & MSR_IR))
1142 return -2; /* MMIO emulation - load instr word */
1143 }
697d3899
PM
1144
1145 return -1; /* send fault up to host kernel mode */
697d3899 1146}