Commit | Line | Data |
---|---|---|
a8606e20 PM |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
7 | */ | |
8 | ||
9 | #include <linux/types.h> | |
10 | #include <linux/string.h> | |
11 | #include <linux/kvm.h> | |
12 | #include <linux/kvm_host.h> | |
13 | #include <linux/hugetlb.h> | |
c77162de | 14 | #include <linux/module.h> |
a8606e20 PM |
15 | |
16 | #include <asm/tlbflush.h> | |
17 | #include <asm/kvm_ppc.h> | |
18 | #include <asm/kvm_book3s.h> | |
19 | #include <asm/mmu-hash64.h> | |
20 | #include <asm/hvcall.h> | |
21 | #include <asm/synch.h> | |
22 | #include <asm/ppc-opcode.h> | |
23 | ||
8936dda4 PM |
24 | /* Translate address of a vmalloc'd thing to a linear map address */ |
25 | static void *real_vmalloc_addr(void *x) | |
26 | { | |
27 | unsigned long addr = (unsigned long) x; | |
28 | pte_t *p; | |
29 | ||
12bc9f6f | 30 | p = find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL); |
8936dda4 PM |
31 | if (!p || !pte_present(*p)) |
32 | return NULL; | |
33 | /* assume we don't have huge pages in vmalloc space... */ | |
34 | addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK); | |
35 | return __va(addr); | |
36 | } | |
a8606e20 | 37 | |
1b400ba0 PM |
38 | /* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */ |
39 | static int global_invalidates(struct kvm *kvm, unsigned long flags) | |
40 | { | |
41 | int global; | |
42 | ||
43 | /* | |
44 | * If there is only one vcore, and it's currently running, | |
55765483 | 45 | * as indicated by local_paca->kvm_hstate.kvm_vcpu being set, |
1b400ba0 PM |
46 | * we can use tlbiel as long as we mark all other physical |
47 | * cores as potentially having stale TLB entries for this lpid. | |
1b400ba0 PM |
48 | * Otherwise, don't use tlbiel. |
49 | */ | |
55765483 | 50 | if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu) |
1b400ba0 | 51 | global = 0; |
1b400ba0 | 52 | else |
c17b98cf | 53 | global = 1; |
1b400ba0 PM |
54 | |
55 | if (!global) { | |
56 | /* any other core might now have stale TLB entries... */ | |
57 | smp_wmb(); | |
58 | cpumask_setall(&kvm->arch.need_tlb_flush); | |
59 | cpumask_clear_cpu(local_paca->kvm_hstate.kvm_vcore->pcpu, | |
60 | &kvm->arch.need_tlb_flush); | |
61 | } | |
62 | ||
63 | return global; | |
64 | } | |
65 | ||
06ce2c63 PM |
66 | /* |
67 | * Add this HPTE into the chain for the real page. | |
68 | * Must be called with the chain locked; it unlocks the chain. | |
69 | */ | |
342d3db7 | 70 | void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, |
06ce2c63 PM |
71 | unsigned long *rmap, long pte_index, int realmode) |
72 | { | |
73 | struct revmap_entry *head, *tail; | |
74 | unsigned long i; | |
75 | ||
76 | if (*rmap & KVMPPC_RMAP_PRESENT) { | |
77 | i = *rmap & KVMPPC_RMAP_INDEX; | |
78 | head = &kvm->arch.revmap[i]; | |
79 | if (realmode) | |
80 | head = real_vmalloc_addr(head); | |
81 | tail = &kvm->arch.revmap[head->back]; | |
82 | if (realmode) | |
83 | tail = real_vmalloc_addr(tail); | |
84 | rev->forw = i; | |
85 | rev->back = head->back; | |
86 | tail->forw = pte_index; | |
87 | head->back = pte_index; | |
88 | } else { | |
89 | rev->forw = rev->back = pte_index; | |
4879f241 PM |
90 | *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | |
91 | pte_index | KVMPPC_RMAP_PRESENT; | |
06ce2c63 | 92 | } |
4879f241 | 93 | unlock_rmap(rmap); |
06ce2c63 | 94 | } |
342d3db7 | 95 | EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain); |
06ce2c63 PM |
96 | |
97 | /* Remove this HPTE from the chain for a real page */ | |
98 | static void remove_revmap_chain(struct kvm *kvm, long pte_index, | |
bad3b507 PM |
99 | struct revmap_entry *rev, |
100 | unsigned long hpte_v, unsigned long hpte_r) | |
06ce2c63 | 101 | { |
bad3b507 | 102 | struct revmap_entry *next, *prev; |
06ce2c63 PM |
103 | unsigned long gfn, ptel, head; |
104 | struct kvm_memory_slot *memslot; | |
105 | unsigned long *rmap; | |
bad3b507 | 106 | unsigned long rcbits; |
06ce2c63 | 107 | |
bad3b507 PM |
108 | rcbits = hpte_r & (HPTE_R_R | HPTE_R_C); |
109 | ptel = rev->guest_rpte |= rcbits; | |
06ce2c63 | 110 | gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel)); |
797f9c07 | 111 | memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); |
dfe49dbd | 112 | if (!memslot) |
06ce2c63 PM |
113 | return; |
114 | ||
d89cc617 | 115 | rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); |
06ce2c63 PM |
116 | lock_rmap(rmap); |
117 | ||
118 | head = *rmap & KVMPPC_RMAP_INDEX; | |
119 | next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]); | |
120 | prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]); | |
121 | next->back = rev->back; | |
122 | prev->forw = rev->forw; | |
123 | if (head == pte_index) { | |
124 | head = rev->forw; | |
125 | if (head == pte_index) | |
126 | *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX); | |
127 | else | |
128 | *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head; | |
129 | } | |
bad3b507 | 130 | *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT; |
06ce2c63 PM |
131 | unlock_rmap(rmap); |
132 | } | |
133 | ||
6f22bd32 | 134 | static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v) |
a92bce95 PM |
135 | { |
136 | asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); | |
6f22bd32 | 137 | hpte[0] = cpu_to_be64(hpte_v); |
a92bce95 PM |
138 | } |
139 | ||
7ed661bf PM |
140 | long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, |
141 | long pte_index, unsigned long pteh, unsigned long ptel, | |
142 | pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret) | |
a8606e20 | 143 | { |
c77162de | 144 | unsigned long i, pa, gpa, gfn, psize; |
342d3db7 | 145 | unsigned long slot_fn, hva; |
6f22bd32 | 146 | __be64 *hpte; |
8936dda4 | 147 | struct revmap_entry *rev; |
44e5f6be | 148 | unsigned long g_ptel; |
b2b2f165 | 149 | struct kvm_memory_slot *memslot; |
dac56570 | 150 | unsigned hpage_shift; |
9d0ef5ea | 151 | unsigned long is_io; |
06ce2c63 | 152 | unsigned long *rmap; |
dac56570 | 153 | pte_t *ptep; |
4cf302bc | 154 | unsigned int writing; |
342d3db7 | 155 | unsigned long mmu_seq; |
bad3b507 | 156 | unsigned long rcbits; |
c77162de PM |
157 | |
158 | psize = hpte_page_size(pteh, ptel); | |
159 | if (!psize) | |
a8606e20 | 160 | return H_PARAMETER; |
4cf302bc | 161 | writing = hpte_is_writable(ptel); |
697d3899 | 162 | pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID); |
44e5f6be PM |
163 | ptel &= ~HPTE_GR_RESERVED; |
164 | g_ptel = ptel; | |
b2b2f165 | 165 | |
342d3db7 PM |
166 | /* used later to detect if we might have been invalidated */ |
167 | mmu_seq = kvm->mmu_notifier_seq; | |
168 | smp_rmb(); | |
169 | ||
c77162de PM |
170 | /* Find the memslot (if any) for this address */ |
171 | gpa = (ptel & HPTE_R_RPN) & ~(psize - 1); | |
172 | gfn = gpa >> PAGE_SHIFT; | |
797f9c07 | 173 | memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); |
697d3899 | 174 | pa = 0; |
342d3db7 | 175 | is_io = ~0ul; |
697d3899 PM |
176 | rmap = NULL; |
177 | if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) { | |
697d3899 PM |
178 | /* Emulated MMIO - mark this with key=31 */ |
179 | pteh |= HPTE_V_ABSENT; | |
180 | ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO; | |
181 | goto do_insert; | |
182 | } | |
da9d1d7f PM |
183 | |
184 | /* Check if the requested page fits entirely in the memslot. */ | |
185 | if (!slot_is_aligned(memslot, psize)) | |
186 | return H_PARAMETER; | |
c77162de | 187 | slot_fn = gfn - memslot->base_gfn; |
d89cc617 | 188 | rmap = &memslot->arch.rmap[slot_fn]; |
c77162de | 189 | |
c17b98cf PM |
190 | /* Translate to host virtual address */ |
191 | hva = __gfn_to_hva_memslot(memslot, gfn); | |
dac56570 AK |
192 | ptep = find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift); |
193 | if (ptep) { | |
194 | pte_t pte; | |
195 | unsigned int host_pte_size; | |
c17b98cf | 196 | |
dac56570 AK |
197 | if (hpage_shift) |
198 | host_pte_size = 1ul << hpage_shift; | |
199 | else | |
200 | host_pte_size = PAGE_SIZE; | |
201 | /* | |
202 | * We should always find the guest page size | |
203 | * to <= host page size, if host is using hugepage | |
204 | */ | |
205 | if (host_pte_size < psize) | |
206 | return H_PARAMETER; | |
7ed661bf | 207 | |
dac56570 AK |
208 | pte = kvmppc_read_update_linux_pte(ptep, writing, hpage_shift); |
209 | if (pte_present(pte) && !pte_protnone(pte)) { | |
210 | if (writing && !pte_write(pte)) | |
211 | /* make the actual HPTE be read-only */ | |
212 | ptel = hpte_make_readonly(ptel); | |
213 | is_io = hpte_cache_bits(pte_val(pte)); | |
214 | pa = pte_pfn(pte) << PAGE_SHIFT; | |
215 | pa |= hva & (host_pte_size - 1); | |
216 | pa |= gpa & ~PAGE_MASK; | |
217 | } | |
218 | } | |
c77162de PM |
219 | |
220 | ptel &= ~(HPTE_R_PP0 - psize); | |
221 | ptel |= pa; | |
342d3db7 PM |
222 | |
223 | if (pa) | |
224 | pteh |= HPTE_V_VALID; | |
225 | else | |
226 | pteh |= HPTE_V_ABSENT; | |
c77162de | 227 | |
a8606e20 | 228 | /* Check WIMG */ |
342d3db7 | 229 | if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) { |
9d0ef5ea PM |
230 | if (is_io) |
231 | return H_PARAMETER; | |
232 | /* | |
233 | * Allow guest to map emulated device memory as | |
234 | * uncacheable, but actually make it cacheable. | |
235 | */ | |
236 | ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G); | |
237 | ptel |= HPTE_R_M; | |
238 | } | |
075295dd | 239 | |
342d3db7 | 240 | /* Find and lock the HPTEG slot to use */ |
697d3899 | 241 | do_insert: |
32fad281 | 242 | if (pte_index >= kvm->arch.hpt_npte) |
a8606e20 PM |
243 | return H_PARAMETER; |
244 | if (likely((flags & H_EXACT) == 0)) { | |
245 | pte_index &= ~7UL; | |
6f22bd32 | 246 | hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); |
075295dd | 247 | for (i = 0; i < 8; ++i) { |
6f22bd32 | 248 | if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 && |
697d3899 PM |
249 | try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | |
250 | HPTE_V_ABSENT)) | |
a8606e20 PM |
251 | break; |
252 | hpte += 2; | |
253 | } | |
075295dd PM |
254 | if (i == 8) { |
255 | /* | |
256 | * Since try_lock_hpte doesn't retry (not even stdcx. | |
257 | * failures), it could be that there is a free slot | |
258 | * but we transiently failed to lock it. Try again, | |
259 | * actually locking each slot and checking it. | |
260 | */ | |
261 | hpte -= 16; | |
262 | for (i = 0; i < 8; ++i) { | |
6f22bd32 | 263 | u64 pte; |
075295dd PM |
264 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) |
265 | cpu_relax(); | |
6f22bd32 AG |
266 | pte = be64_to_cpu(*hpte); |
267 | if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT))) | |
075295dd | 268 | break; |
6f22bd32 | 269 | *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK); |
075295dd PM |
270 | hpte += 2; |
271 | } | |
272 | if (i == 8) | |
273 | return H_PTEG_FULL; | |
274 | } | |
8936dda4 | 275 | pte_index += i; |
a8606e20 | 276 | } else { |
6f22bd32 | 277 | hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); |
697d3899 PM |
278 | if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | |
279 | HPTE_V_ABSENT)) { | |
075295dd | 280 | /* Lock the slot and check again */ |
6f22bd32 AG |
281 | u64 pte; |
282 | ||
075295dd PM |
283 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) |
284 | cpu_relax(); | |
6f22bd32 AG |
285 | pte = be64_to_cpu(*hpte); |
286 | if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) { | |
287 | *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK); | |
075295dd PM |
288 | return H_PTEG_FULL; |
289 | } | |
290 | } | |
a8606e20 | 291 | } |
8936dda4 PM |
292 | |
293 | /* Save away the guest's idea of the second HPTE dword */ | |
06ce2c63 PM |
294 | rev = &kvm->arch.revmap[pte_index]; |
295 | if (realmode) | |
296 | rev = real_vmalloc_addr(rev); | |
44e5f6be | 297 | if (rev) { |
8936dda4 | 298 | rev->guest_rpte = g_ptel; |
44e5f6be PM |
299 | note_hpte_modification(kvm, rev); |
300 | } | |
06ce2c63 PM |
301 | |
302 | /* Link HPTE into reverse-map chain */ | |
697d3899 PM |
303 | if (pteh & HPTE_V_VALID) { |
304 | if (realmode) | |
305 | rmap = real_vmalloc_addr(rmap); | |
306 | lock_rmap(rmap); | |
342d3db7 | 307 | /* Check for pending invalidations under the rmap chain lock */ |
c17b98cf | 308 | if (mmu_notifier_retry(kvm, mmu_seq)) { |
342d3db7 PM |
309 | /* inval in progress, write a non-present HPTE */ |
310 | pteh |= HPTE_V_ABSENT; | |
311 | pteh &= ~HPTE_V_VALID; | |
312 | unlock_rmap(rmap); | |
313 | } else { | |
314 | kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index, | |
315 | realmode); | |
bad3b507 PM |
316 | /* Only set R/C in real HPTE if already set in *rmap */ |
317 | rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; | |
318 | ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C); | |
342d3db7 | 319 | } |
697d3899 | 320 | } |
06ce2c63 | 321 | |
6f22bd32 | 322 | hpte[1] = cpu_to_be64(ptel); |
06ce2c63 PM |
323 | |
324 | /* Write the first HPTE dword, unlocking the HPTE and making it valid */ | |
a8606e20 | 325 | eieio(); |
6f22bd32 | 326 | hpte[0] = cpu_to_be64(pteh); |
a8606e20 | 327 | asm volatile("ptesync" : : : "memory"); |
06ce2c63 | 328 | |
7ed661bf | 329 | *pte_idx_ret = pte_index; |
a8606e20 PM |
330 | return H_SUCCESS; |
331 | } | |
7ed661bf PM |
332 | EXPORT_SYMBOL_GPL(kvmppc_do_h_enter); |
333 | ||
334 | long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, | |
335 | long pte_index, unsigned long pteh, unsigned long ptel) | |
336 | { | |
337 | return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel, | |
338 | vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]); | |
339 | } | |
a8606e20 | 340 | |
54bb7f4b | 341 | #ifdef __BIG_ENDIAN__ |
a8606e20 | 342 | #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) |
54bb7f4b AB |
343 | #else |
344 | #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) | |
345 | #endif | |
a8606e20 PM |
346 | |
347 | static inline int try_lock_tlbie(unsigned int *lock) | |
348 | { | |
349 | unsigned int tmp, old; | |
350 | unsigned int token = LOCK_TOKEN; | |
351 | ||
352 | asm volatile("1:lwarx %1,0,%2\n" | |
353 | " cmpwi cr0,%1,0\n" | |
354 | " bne 2f\n" | |
355 | " stwcx. %3,0,%2\n" | |
356 | " bne- 1b\n" | |
357 | " isync\n" | |
358 | "2:" | |
359 | : "=&r" (tmp), "=&r" (old) | |
360 | : "r" (lock), "r" (token) | |
361 | : "cc", "memory"); | |
362 | return old == 0; | |
363 | } | |
364 | ||
54480501 PM |
365 | static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, |
366 | long npages, int global, bool need_sync) | |
367 | { | |
368 | long i; | |
369 | ||
54480501 PM |
370 | if (global) { |
371 | while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) | |
372 | cpu_relax(); | |
373 | if (need_sync) | |
374 | asm volatile("ptesync" : : : "memory"); | |
375 | for (i = 0; i < npages; ++i) | |
376 | asm volatile(PPC_TLBIE(%1,%0) : : | |
377 | "r" (rbvalues[i]), "r" (kvm->arch.lpid)); | |
378 | asm volatile("eieio; tlbsync; ptesync" : : : "memory"); | |
379 | kvm->arch.tlbie_lock = 0; | |
380 | } else { | |
381 | if (need_sync) | |
382 | asm volatile("ptesync" : : : "memory"); | |
383 | for (i = 0; i < npages; ++i) | |
384 | asm volatile("tlbiel %0" : : "r" (rbvalues[i])); | |
385 | asm volatile("ptesync" : : : "memory"); | |
386 | } | |
387 | } | |
388 | ||
6b445ad4 PM |
389 | long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, |
390 | unsigned long pte_index, unsigned long avpn, | |
391 | unsigned long *hpret) | |
a8606e20 | 392 | { |
6f22bd32 | 393 | __be64 *hpte; |
a8606e20 | 394 | unsigned long v, r, rb; |
a92bce95 | 395 | struct revmap_entry *rev; |
6f22bd32 | 396 | u64 pte; |
a8606e20 | 397 | |
32fad281 | 398 | if (pte_index >= kvm->arch.hpt_npte) |
a8606e20 | 399 | return H_PARAMETER; |
6f22bd32 | 400 | hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); |
075295dd | 401 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) |
a8606e20 | 402 | cpu_relax(); |
6f22bd32 AG |
403 | pte = be64_to_cpu(hpte[0]); |
404 | if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || | |
405 | ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) || | |
406 | ((flags & H_ANDCOND) && (pte & avpn) != 0)) { | |
407 | hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); | |
a8606e20 PM |
408 | return H_NOT_FOUND; |
409 | } | |
a92bce95 PM |
410 | |
411 | rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); | |
6f22bd32 | 412 | v = pte & ~HPTE_V_HVLOCK; |
a92bce95 | 413 | if (v & HPTE_V_VALID) { |
6f22bd32 AG |
414 | u64 pte1; |
415 | ||
416 | pte1 = be64_to_cpu(hpte[1]); | |
417 | hpte[0] &= ~cpu_to_be64(HPTE_V_VALID); | |
418 | rb = compute_tlbie_rb(v, pte1, pte_index); | |
54480501 | 419 | do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true); |
bad3b507 | 420 | /* Read PTE low word after tlbie to get final R/C values */ |
6f22bd32 | 421 | remove_revmap_chain(kvm, pte_index, rev, v, pte1); |
a8606e20 | 422 | } |
44e5f6be PM |
423 | r = rev->guest_rpte & ~HPTE_GR_RESERVED; |
424 | note_hpte_modification(kvm, rev); | |
a92bce95 PM |
425 | unlock_hpte(hpte, 0); |
426 | ||
6b445ad4 PM |
427 | hpret[0] = v; |
428 | hpret[1] = r; | |
a8606e20 PM |
429 | return H_SUCCESS; |
430 | } | |
6b445ad4 PM |
431 | EXPORT_SYMBOL_GPL(kvmppc_do_h_remove); |
432 | ||
433 | long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, | |
434 | unsigned long pte_index, unsigned long avpn) | |
435 | { | |
436 | return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn, | |
437 | &vcpu->arch.gpr[4]); | |
438 | } | |
a8606e20 PM |
439 | |
440 | long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) | |
441 | { | |
442 | struct kvm *kvm = vcpu->kvm; | |
443 | unsigned long *args = &vcpu->arch.gpr[4]; | |
6f22bd32 AG |
444 | __be64 *hp, *hptes[4]; |
445 | unsigned long tlbrb[4]; | |
a92bce95 PM |
446 | long int i, j, k, n, found, indexes[4]; |
447 | unsigned long flags, req, pte_index, rcbits; | |
54480501 | 448 | int global; |
a8606e20 | 449 | long int ret = H_SUCCESS; |
a92bce95 | 450 | struct revmap_entry *rev, *revs[4]; |
6f22bd32 | 451 | u64 hp0; |
a8606e20 | 452 | |
54480501 | 453 | global = global_invalidates(kvm, 0); |
a92bce95 PM |
454 | for (i = 0; i < 4 && ret == H_SUCCESS; ) { |
455 | n = 0; | |
456 | for (; i < 4; ++i) { | |
457 | j = i * 2; | |
458 | pte_index = args[j]; | |
459 | flags = pte_index >> 56; | |
460 | pte_index &= ((1ul << 56) - 1); | |
461 | req = flags >> 6; | |
462 | flags &= 3; | |
463 | if (req == 3) { /* no more requests */ | |
464 | i = 4; | |
a8606e20 | 465 | break; |
a92bce95 | 466 | } |
32fad281 PM |
467 | if (req != 1 || flags == 3 || |
468 | pte_index >= kvm->arch.hpt_npte) { | |
a92bce95 PM |
469 | /* parameter error */ |
470 | args[j] = ((0xa0 | flags) << 56) + pte_index; | |
471 | ret = H_PARAMETER; | |
a8606e20 | 472 | break; |
a92bce95 | 473 | } |
6f22bd32 | 474 | hp = (__be64 *) (kvm->arch.hpt_virt + (pte_index << 4)); |
a92bce95 PM |
475 | /* to avoid deadlock, don't spin except for first */ |
476 | if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) { | |
477 | if (n) | |
478 | break; | |
479 | while (!try_lock_hpte(hp, HPTE_V_HVLOCK)) | |
480 | cpu_relax(); | |
481 | } | |
482 | found = 0; | |
6f22bd32 AG |
483 | hp0 = be64_to_cpu(hp[0]); |
484 | if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) { | |
a92bce95 PM |
485 | switch (flags & 3) { |
486 | case 0: /* absolute */ | |
a8606e20 | 487 | found = 1; |
a92bce95 PM |
488 | break; |
489 | case 1: /* andcond */ | |
6f22bd32 | 490 | if (!(hp0 & args[j + 1])) |
a92bce95 PM |
491 | found = 1; |
492 | break; | |
493 | case 2: /* AVPN */ | |
6f22bd32 | 494 | if ((hp0 & ~0x7fUL) == args[j + 1]) |
a92bce95 PM |
495 | found = 1; |
496 | break; | |
497 | } | |
498 | } | |
499 | if (!found) { | |
6f22bd32 | 500 | hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); |
a92bce95 PM |
501 | args[j] = ((0x90 | flags) << 56) + pte_index; |
502 | continue; | |
a8606e20 | 503 | } |
a92bce95 PM |
504 | |
505 | args[j] = ((0x80 | flags) << 56) + pte_index; | |
506 | rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); | |
44e5f6be | 507 | note_hpte_modification(kvm, rev); |
a92bce95 | 508 | |
6f22bd32 | 509 | if (!(hp0 & HPTE_V_VALID)) { |
bad3b507 PM |
510 | /* insert R and C bits from PTE */ |
511 | rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); | |
512 | args[j] |= rcbits << (56 - 5); | |
51bfd299 | 513 | hp[0] = 0; |
a92bce95 | 514 | continue; |
bad3b507 | 515 | } |
a92bce95 | 516 | |
6f22bd32 AG |
517 | /* leave it locked */ |
518 | hp[0] &= ~cpu_to_be64(HPTE_V_VALID); | |
519 | tlbrb[n] = compute_tlbie_rb(be64_to_cpu(hp[0]), | |
520 | be64_to_cpu(hp[1]), pte_index); | |
a92bce95 PM |
521 | indexes[n] = j; |
522 | hptes[n] = hp; | |
523 | revs[n] = rev; | |
524 | ++n; | |
a8606e20 | 525 | } |
a92bce95 PM |
526 | |
527 | if (!n) | |
528 | break; | |
529 | ||
530 | /* Now that we've collected a batch, do the tlbies */ | |
54480501 | 531 | do_tlbies(kvm, tlbrb, n, global, true); |
a92bce95 | 532 | |
bad3b507 | 533 | /* Read PTE low words after tlbie to get final R/C values */ |
a92bce95 PM |
534 | for (k = 0; k < n; ++k) { |
535 | j = indexes[k]; | |
536 | pte_index = args[j] & ((1ul << 56) - 1); | |
537 | hp = hptes[k]; | |
538 | rev = revs[k]; | |
6f22bd32 AG |
539 | remove_revmap_chain(kvm, pte_index, rev, |
540 | be64_to_cpu(hp[0]), be64_to_cpu(hp[1])); | |
bad3b507 PM |
541 | rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); |
542 | args[j] |= rcbits << (56 - 5); | |
543 | hp[0] = 0; | |
697d3899 | 544 | } |
a8606e20 | 545 | } |
a92bce95 | 546 | |
a8606e20 PM |
547 | return ret; |
548 | } | |
549 | ||
550 | long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, | |
551 | unsigned long pte_index, unsigned long avpn, | |
552 | unsigned long va) | |
553 | { | |
554 | struct kvm *kvm = vcpu->kvm; | |
6f22bd32 | 555 | __be64 *hpte; |
8936dda4 PM |
556 | struct revmap_entry *rev; |
557 | unsigned long v, r, rb, mask, bits; | |
6f22bd32 | 558 | u64 pte; |
a8606e20 | 559 | |
32fad281 | 560 | if (pte_index >= kvm->arch.hpt_npte) |
a8606e20 | 561 | return H_PARAMETER; |
697d3899 | 562 | |
6f22bd32 | 563 | hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); |
075295dd | 564 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) |
a8606e20 | 565 | cpu_relax(); |
6f22bd32 AG |
566 | pte = be64_to_cpu(hpte[0]); |
567 | if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || | |
568 | ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) { | |
569 | hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); | |
a8606e20 PM |
570 | return H_NOT_FOUND; |
571 | } | |
697d3899 | 572 | |
6f22bd32 | 573 | v = pte; |
8936dda4 PM |
574 | bits = (flags << 55) & HPTE_R_PP0; |
575 | bits |= (flags << 48) & HPTE_R_KEY_HI; | |
576 | bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO); | |
577 | ||
578 | /* Update guest view of 2nd HPTE dword */ | |
579 | mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | | |
580 | HPTE_R_KEY_HI | HPTE_R_KEY_LO; | |
581 | rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); | |
582 | if (rev) { | |
583 | r = (rev->guest_rpte & ~mask) | bits; | |
584 | rev->guest_rpte = r; | |
44e5f6be | 585 | note_hpte_modification(kvm, rev); |
8936dda4 | 586 | } |
8936dda4 PM |
587 | |
588 | /* Update HPTE */ | |
697d3899 | 589 | if (v & HPTE_V_VALID) { |
1cc8ed0b | 590 | /* |
b4a83900 PM |
591 | * If the page is valid, don't let it transition from |
592 | * readonly to writable. If it should be writable, we'll | |
593 | * take a trap and let the page fault code sort it out. | |
1cc8ed0b | 594 | */ |
b4a83900 PM |
595 | pte = be64_to_cpu(hpte[1]); |
596 | r = (pte & ~mask) | bits; | |
c17b98cf | 597 | if (hpte_is_writable(r) && !hpte_is_writable(pte)) |
b4a83900 PM |
598 | r = hpte_make_readonly(r); |
599 | /* If the PTE is changing, invalidate it first */ | |
600 | if (r != pte) { | |
601 | rb = compute_tlbie_rb(v, r, pte_index); | |
602 | hpte[0] = cpu_to_be64((v & ~HPTE_V_VALID) | | |
603 | HPTE_V_ABSENT); | |
604 | do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), | |
605 | true); | |
606 | hpte[1] = cpu_to_be64(r); | |
1cc8ed0b | 607 | } |
a8606e20 | 608 | } |
b4a83900 | 609 | unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); |
a8606e20 PM |
610 | asm volatile("ptesync" : : : "memory"); |
611 | return H_SUCCESS; | |
612 | } | |
613 | ||
a8606e20 PM |
614 | long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, |
615 | unsigned long pte_index) | |
616 | { | |
617 | struct kvm *kvm = vcpu->kvm; | |
6f22bd32 AG |
618 | __be64 *hpte; |
619 | unsigned long v, r; | |
a8606e20 | 620 | int i, n = 1; |
8936dda4 | 621 | struct revmap_entry *rev = NULL; |
a8606e20 | 622 | |
32fad281 | 623 | if (pte_index >= kvm->arch.hpt_npte) |
a8606e20 PM |
624 | return H_PARAMETER; |
625 | if (flags & H_READ_4) { | |
626 | pte_index &= ~3; | |
627 | n = 4; | |
628 | } | |
bad3b507 | 629 | rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); |
a8606e20 | 630 | for (i = 0; i < n; ++i, ++pte_index) { |
6f22bd32 AG |
631 | hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); |
632 | v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK; | |
633 | r = be64_to_cpu(hpte[1]); | |
697d3899 PM |
634 | if (v & HPTE_V_ABSENT) { |
635 | v &= ~HPTE_V_ABSENT; | |
636 | v |= HPTE_V_VALID; | |
637 | } | |
44e5f6be | 638 | if (v & HPTE_V_VALID) { |
bad3b507 | 639 | r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C)); |
44e5f6be PM |
640 | r &= ~HPTE_GR_RESERVED; |
641 | } | |
697d3899 | 642 | vcpu->arch.gpr[4 + i * 2] = v; |
a8606e20 PM |
643 | vcpu->arch.gpr[5 + i * 2] = r; |
644 | } | |
645 | return H_SUCCESS; | |
646 | } | |
697d3899 | 647 | |
6f22bd32 | 648 | void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep, |
342d3db7 PM |
649 | unsigned long pte_index) |
650 | { | |
651 | unsigned long rb; | |
652 | ||
6f22bd32 AG |
653 | hptep[0] &= ~cpu_to_be64(HPTE_V_VALID); |
654 | rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]), | |
655 | pte_index); | |
54480501 | 656 | do_tlbies(kvm, &rb, 1, 1, true); |
342d3db7 PM |
657 | } |
658 | EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte); | |
659 | ||
6f22bd32 | 660 | void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep, |
55514893 PM |
661 | unsigned long pte_index) |
662 | { | |
663 | unsigned long rb; | |
664 | unsigned char rbyte; | |
665 | ||
6f22bd32 AG |
666 | rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]), |
667 | pte_index); | |
668 | rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8; | |
55514893 PM |
669 | /* modify only the second-last byte, which contains the ref bit */ |
670 | *((char *)hptep + 14) = rbyte; | |
54480501 | 671 | do_tlbies(kvm, &rb, 1, 1, false); |
55514893 PM |
672 | } |
673 | EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte); | |
674 | ||
697d3899 PM |
675 | static int slb_base_page_shift[4] = { |
676 | 24, /* 16M */ | |
677 | 16, /* 64k */ | |
678 | 34, /* 16G */ | |
679 | 20, /* 1M, unsupported */ | |
680 | }; | |
681 | ||
91648ec0 | 682 | /* When called from virtmode, this func should be protected by |
683 | * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK | |
684 | * can trigger deadlock issue. | |
685 | */ | |
697d3899 PM |
686 | long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, |
687 | unsigned long valid) | |
688 | { | |
689 | unsigned int i; | |
690 | unsigned int pshift; | |
691 | unsigned long somask; | |
692 | unsigned long vsid, hash; | |
693 | unsigned long avpn; | |
6f22bd32 | 694 | __be64 *hpte; |
697d3899 PM |
695 | unsigned long mask, val; |
696 | unsigned long v, r; | |
697 | ||
698 | /* Get page shift, work out hash and AVPN etc. */ | |
699 | mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY; | |
700 | val = 0; | |
701 | pshift = 12; | |
702 | if (slb_v & SLB_VSID_L) { | |
703 | mask |= HPTE_V_LARGE; | |
704 | val |= HPTE_V_LARGE; | |
705 | pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4]; | |
706 | } | |
707 | if (slb_v & SLB_VSID_B_1T) { | |
708 | somask = (1UL << 40) - 1; | |
709 | vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T; | |
710 | vsid ^= vsid << 25; | |
711 | } else { | |
712 | somask = (1UL << 28) - 1; | |
713 | vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT; | |
714 | } | |
32fad281 | 715 | hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask; |
697d3899 PM |
716 | avpn = slb_v & ~(somask >> 16); /* also includes B */ |
717 | avpn |= (eaddr & somask) >> 16; | |
718 | ||
719 | if (pshift >= 24) | |
720 | avpn &= ~((1UL << (pshift - 16)) - 1); | |
721 | else | |
722 | avpn &= ~0x7fUL; | |
723 | val |= avpn; | |
724 | ||
725 | for (;;) { | |
6f22bd32 | 726 | hpte = (__be64 *)(kvm->arch.hpt_virt + (hash << 7)); |
697d3899 PM |
727 | |
728 | for (i = 0; i < 16; i += 2) { | |
729 | /* Read the PTE racily */ | |
6f22bd32 | 730 | v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK; |
697d3899 PM |
731 | |
732 | /* Check valid/absent, hash, segment size and AVPN */ | |
733 | if (!(v & valid) || (v & mask) != val) | |
734 | continue; | |
735 | ||
736 | /* Lock the PTE and read it under the lock */ | |
737 | while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK)) | |
738 | cpu_relax(); | |
6f22bd32 AG |
739 | v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK; |
740 | r = be64_to_cpu(hpte[i+1]); | |
697d3899 PM |
741 | |
742 | /* | |
341acbb3 | 743 | * Check the HPTE again, including base page size |
697d3899 PM |
744 | */ |
745 | if ((v & valid) && (v & mask) == val && | |
341acbb3 | 746 | hpte_base_page_size(v, r) == (1ul << pshift)) |
697d3899 PM |
747 | /* Return with the HPTE still locked */ |
748 | return (hash << 3) + (i >> 1); | |
749 | ||
750 | /* Unlock and move on */ | |
6f22bd32 | 751 | hpte[i] = cpu_to_be64(v); |
697d3899 PM |
752 | } |
753 | ||
754 | if (val & HPTE_V_SECONDARY) | |
755 | break; | |
756 | val |= HPTE_V_SECONDARY; | |
32fad281 | 757 | hash = hash ^ kvm->arch.hpt_mask; |
697d3899 PM |
758 | } |
759 | return -1; | |
760 | } | |
761 | EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte); | |
762 | ||
763 | /* | |
764 | * Called in real mode to check whether an HPTE not found fault | |
4cf302bc PM |
765 | * is due to accessing a paged-out page or an emulated MMIO page, |
766 | * or if a protection fault is due to accessing a page that the | |
767 | * guest wanted read/write access to but which we made read-only. | |
697d3899 PM |
768 | * Returns a possibly modified status (DSISR) value if not |
769 | * (i.e. pass the interrupt to the guest), | |
770 | * -1 to pass the fault up to host kernel mode code, -2 to do that | |
342d3db7 | 771 | * and also load the instruction word (for MMIO emulation), |
697d3899 PM |
772 | * or 0 if we should make the guest retry the access. |
773 | */ | |
774 | long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, | |
342d3db7 | 775 | unsigned long slb_v, unsigned int status, bool data) |
697d3899 PM |
776 | { |
777 | struct kvm *kvm = vcpu->kvm; | |
778 | long int index; | |
779 | unsigned long v, r, gr; | |
6f22bd32 | 780 | __be64 *hpte; |
697d3899 PM |
781 | unsigned long valid; |
782 | struct revmap_entry *rev; | |
783 | unsigned long pp, key; | |
784 | ||
4cf302bc PM |
785 | /* For protection fault, expect to find a valid HPTE */ |
786 | valid = HPTE_V_VALID; | |
787 | if (status & DSISR_NOHPTE) | |
788 | valid |= HPTE_V_ABSENT; | |
342d3db7 | 789 | |
697d3899 | 790 | index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid); |
4cf302bc PM |
791 | if (index < 0) { |
792 | if (status & DSISR_NOHPTE) | |
793 | return status; /* there really was no HPTE */ | |
794 | return 0; /* for prot fault, HPTE disappeared */ | |
795 | } | |
6f22bd32 AG |
796 | hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4)); |
797 | v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK; | |
798 | r = be64_to_cpu(hpte[1]); | |
697d3899 PM |
799 | rev = real_vmalloc_addr(&kvm->arch.revmap[index]); |
800 | gr = rev->guest_rpte; | |
801 | ||
a92bce95 | 802 | unlock_hpte(hpte, v); |
697d3899 | 803 | |
4cf302bc PM |
804 | /* For not found, if the HPTE is valid by now, retry the instruction */ |
805 | if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID)) | |
697d3899 PM |
806 | return 0; |
807 | ||
808 | /* Check access permissions to the page */ | |
809 | pp = gr & (HPTE_R_PP0 | HPTE_R_PP); | |
810 | key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; | |
342d3db7 PM |
811 | status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */ |
812 | if (!data) { | |
813 | if (gr & (HPTE_R_N | HPTE_R_G)) | |
814 | return status | SRR1_ISI_N_OR_G; | |
815 | if (!hpte_read_permission(pp, slb_v & key)) | |
816 | return status | SRR1_ISI_PROT; | |
817 | } else if (status & DSISR_ISSTORE) { | |
697d3899 PM |
818 | /* check write permission */ |
819 | if (!hpte_write_permission(pp, slb_v & key)) | |
342d3db7 | 820 | return status | DSISR_PROTFAULT; |
697d3899 PM |
821 | } else { |
822 | if (!hpte_read_permission(pp, slb_v & key)) | |
342d3db7 | 823 | return status | DSISR_PROTFAULT; |
697d3899 PM |
824 | } |
825 | ||
826 | /* Check storage key, if applicable */ | |
342d3db7 | 827 | if (data && (vcpu->arch.shregs.msr & MSR_DR)) { |
697d3899 PM |
828 | unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr); |
829 | if (status & DSISR_ISSTORE) | |
830 | perm >>= 1; | |
831 | if (perm & 1) | |
342d3db7 | 832 | return status | DSISR_KEYFAULT; |
697d3899 PM |
833 | } |
834 | ||
835 | /* Save HPTE info for virtual-mode handler */ | |
836 | vcpu->arch.pgfault_addr = addr; | |
837 | vcpu->arch.pgfault_index = index; | |
838 | vcpu->arch.pgfault_hpte[0] = v; | |
839 | vcpu->arch.pgfault_hpte[1] = r; | |
840 | ||
342d3db7 PM |
841 | /* Check the storage key to see if it is possibly emulated MMIO */ |
842 | if (data && (vcpu->arch.shregs.msr & MSR_IR) && | |
843 | (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) == | |
844 | (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) | |
697d3899 PM |
845 | return -2; /* MMIO emulation - load instr word */ |
846 | ||
847 | return -1; /* send fault up to host kernel mode */ | |
697d3899 | 848 | } |