Commit | Line | Data |
---|---|---|
de56a948 PM |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
16 | */ | |
17 | ||
18 | #include <linux/types.h> | |
19 | #include <linux/string.h> | |
20 | #include <linux/kvm.h> | |
21 | #include <linux/kvm_host.h> | |
22 | #include <linux/highmem.h> | |
23 | #include <linux/gfp.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/hugetlb.h> | |
8936dda4 | 26 | #include <linux/vmalloc.h> |
2c9097e4 | 27 | #include <linux/srcu.h> |
a2932923 PM |
28 | #include <linux/anon_inodes.h> |
29 | #include <linux/file.h> | |
e23a808b | 30 | #include <linux/debugfs.h> |
de56a948 | 31 | |
de56a948 PM |
32 | #include <asm/kvm_ppc.h> |
33 | #include <asm/kvm_book3s.h> | |
f64e8084 | 34 | #include <asm/book3s/64/mmu-hash.h> |
de56a948 PM |
35 | #include <asm/hvcall.h> |
36 | #include <asm/synch.h> | |
37 | #include <asm/ppc-opcode.h> | |
38 | #include <asm/cputable.h> | |
94171b19 | 39 | #include <asm/pte-walk.h> |
de56a948 | 40 | |
3c78f78a SW |
41 | #include "trace_hv.h" |
42 | ||
5e985969 DG |
43 | //#define DEBUG_RESIZE_HPT 1 |
44 | ||
45 | #ifdef DEBUG_RESIZE_HPT | |
46 | #define resize_hpt_debug(resize, ...) \ | |
47 | do { \ | |
48 | printk(KERN_DEBUG "RESIZE HPT %p: ", resize); \ | |
49 | printk(__VA_ARGS__); \ | |
50 | } while (0) | |
51 | #else | |
52 | #define resize_hpt_debug(resize, ...) \ | |
53 | do { } while (0) | |
54 | #endif | |
55 | ||
7ed661bf PM |
56 | static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, |
57 | long pte_index, unsigned long pteh, | |
58 | unsigned long ptel, unsigned long *pte_idx_ret); | |
5e985969 DG |
59 | |
60 | struct kvm_resize_hpt { | |
61 | /* These fields read-only after init */ | |
62 | struct kvm *kvm; | |
63 | struct work_struct work; | |
64 | u32 order; | |
65 | ||
66 | /* These fields protected by kvm->lock */ | |
3073774e SP |
67 | |
68 | /* Possible values and their usage: | |
69 | * <0 an error occurred during allocation, | |
70 | * -EBUSY allocation is in the progress, | |
71 | * 0 allocation made successfuly. | |
72 | */ | |
5e985969 | 73 | int error; |
b5baa687 | 74 | |
3073774e SP |
75 | /* Private to the work thread, until error != -EBUSY, |
76 | * then protected by kvm->lock. | |
77 | */ | |
b5baa687 | 78 | struct kvm_hpt_info hpt; |
5e985969 DG |
79 | }; |
80 | ||
aae0777f | 81 | int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order) |
de56a948 | 82 | { |
792fc497 | 83 | unsigned long hpt = 0; |
aae0777f | 84 | int cma = 0; |
fa61a4e3 | 85 | struct page *page = NULL; |
aae0777f DG |
86 | struct revmap_entry *rev; |
87 | unsigned long npte; | |
de56a948 | 88 | |
aae0777f DG |
89 | if ((order < PPC_MIN_HPT_ORDER) || (order > PPC_MAX_HPT_ORDER)) |
90 | return -EINVAL; | |
32fad281 | 91 | |
db9a290d | 92 | page = kvm_alloc_hpt_cma(1ul << (order - PAGE_SHIFT)); |
792fc497 AK |
93 | if (page) { |
94 | hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); | |
02a68d05 | 95 | memset((void *)hpt, 0, (1ul << order)); |
aae0777f | 96 | cma = 1; |
de56a948 | 97 | } |
32fad281 | 98 | |
aae0777f | 99 | if (!hpt) |
dcda9b04 | 100 | hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_RETRY_MAYFAIL |
aae0777f | 101 | |__GFP_NOWARN, order - PAGE_SHIFT); |
32fad281 PM |
102 | |
103 | if (!hpt) | |
104 | return -ENOMEM; | |
105 | ||
aae0777f DG |
106 | /* HPTEs are 2**4 bytes long */ |
107 | npte = 1ul << (order - 4); | |
a56ee9f8 | 108 | |
8936dda4 | 109 | /* Allocate reverse map array */ |
42bc47b3 | 110 | rev = vmalloc(array_size(npte, sizeof(struct revmap_entry))); |
8936dda4 | 111 | if (!rev) { |
aae0777f DG |
112 | if (cma) |
113 | kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT)); | |
114 | else | |
115 | free_pages(hpt, order - PAGE_SHIFT); | |
116 | return -ENOMEM; | |
8936dda4 | 117 | } |
8936dda4 | 118 | |
aae0777f DG |
119 | info->order = order; |
120 | info->virt = hpt; | |
121 | info->cma = cma; | |
122 | info->rev = rev; | |
de56a948 | 123 | |
de56a948 | 124 | return 0; |
aae0777f | 125 | } |
8936dda4 | 126 | |
aae0777f DG |
127 | void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info) |
128 | { | |
129 | atomic64_set(&kvm->arch.mmio_update, 0); | |
130 | kvm->arch.hpt = *info; | |
131 | kvm->arch.sdr1 = __pa(info->virt) | (info->order - 18); | |
132 | ||
3a4f1760 TH |
133 | pr_debug("KVM guest htab at %lx (order %ld), LPID %x\n", |
134 | info->virt, (long)info->order, kvm->arch.lpid); | |
de56a948 PM |
135 | } |
136 | ||
f98a8bf9 | 137 | long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) |
32fad281 PM |
138 | { |
139 | long err = -EBUSY; | |
f98a8bf9 | 140 | struct kvm_hpt_info info; |
32fad281 PM |
141 | |
142 | mutex_lock(&kvm->lock); | |
1b151ce4 PM |
143 | if (kvm->arch.mmu_ready) { |
144 | kvm->arch.mmu_ready = 0; | |
145 | /* order mmu_ready vs. vcpus_running */ | |
32fad281 PM |
146 | smp_mb(); |
147 | if (atomic_read(&kvm->arch.vcpus_running)) { | |
1b151ce4 | 148 | kvm->arch.mmu_ready = 1; |
32fad281 PM |
149 | goto out; |
150 | } | |
151 | } | |
18c3640c PM |
152 | if (kvm_is_radix(kvm)) { |
153 | err = kvmppc_switch_mmu_to_hpt(kvm); | |
154 | if (err) | |
155 | goto out; | |
156 | } | |
157 | ||
f98a8bf9 DG |
158 | if (kvm->arch.hpt.order == order) { |
159 | /* We already have a suitable HPT */ | |
160 | ||
32fad281 | 161 | /* Set the entire HPT to 0, i.e. invalid HPTEs */ |
3f9d4f5a | 162 | memset((void *)kvm->arch.hpt.virt, 0, 1ul << order); |
a64fd707 PM |
163 | /* |
164 | * Reset all the reverse-mapping chains for all memslots | |
165 | */ | |
166 | kvmppc_rmap_reset(kvm); | |
32fad281 | 167 | err = 0; |
f98a8bf9 | 168 | goto out; |
32fad281 | 169 | } |
f98a8bf9 | 170 | |
ef427198 | 171 | if (kvm->arch.hpt.virt) { |
f98a8bf9 | 172 | kvmppc_free_hpt(&kvm->arch.hpt); |
ef427198 PM |
173 | kvmppc_rmap_reset(kvm); |
174 | } | |
f98a8bf9 DG |
175 | |
176 | err = kvmppc_allocate_hpt(&info, order); | |
177 | if (err < 0) | |
178 | goto out; | |
179 | kvmppc_set_hpt(kvm, &info); | |
180 | ||
181 | out: | |
ecba8297 DG |
182 | if (err == 0) |
183 | /* Ensure that each vcpu will flush its TLB on next entry. */ | |
184 | cpumask_setall(&kvm->arch.need_tlb_flush); | |
185 | ||
32fad281 PM |
186 | mutex_unlock(&kvm->lock); |
187 | return err; | |
188 | } | |
189 | ||
aae0777f | 190 | void kvmppc_free_hpt(struct kvm_hpt_info *info) |
de56a948 | 191 | { |
aae0777f | 192 | vfree(info->rev); |
18c3640c | 193 | info->rev = NULL; |
aae0777f DG |
194 | if (info->cma) |
195 | kvm_free_hpt_cma(virt_to_page(info->virt), | |
196 | 1 << (info->order - PAGE_SHIFT)); | |
197 | else if (info->virt) | |
198 | free_pages(info->virt, info->order - PAGE_SHIFT); | |
199 | info->virt = 0; | |
200 | info->order = 0; | |
de56a948 PM |
201 | } |
202 | ||
da9d1d7f PM |
203 | /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */ |
204 | static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize) | |
205 | { | |
206 | return (pgsize > 0x1000) ? HPTE_V_LARGE : 0; | |
207 | } | |
208 | ||
209 | /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */ | |
210 | static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize) | |
211 | { | |
212 | return (pgsize == 0x10000) ? 0x1000 : 0; | |
213 | } | |
214 | ||
215 | void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, | |
216 | unsigned long porder) | |
de56a948 PM |
217 | { |
218 | unsigned long i; | |
b2b2f165 | 219 | unsigned long npages; |
c77162de PM |
220 | unsigned long hp_v, hp_r; |
221 | unsigned long addr, hash; | |
da9d1d7f PM |
222 | unsigned long psize; |
223 | unsigned long hp0, hp1; | |
7ed661bf | 224 | unsigned long idx_ret; |
c77162de | 225 | long ret; |
32fad281 | 226 | struct kvm *kvm = vcpu->kvm; |
de56a948 | 227 | |
da9d1d7f PM |
228 | psize = 1ul << porder; |
229 | npages = memslot->npages >> (porder - PAGE_SHIFT); | |
de56a948 PM |
230 | |
231 | /* VRMA can't be > 1TB */ | |
8936dda4 PM |
232 | if (npages > 1ul << (40 - porder)) |
233 | npages = 1ul << (40 - porder); | |
de56a948 | 234 | /* Can't use more than 1 HPTE per HPTEG */ |
3d089f84 DG |
235 | if (npages > kvmppc_hpt_mask(&kvm->arch.hpt) + 1) |
236 | npages = kvmppc_hpt_mask(&kvm->arch.hpt) + 1; | |
de56a948 | 237 | |
da9d1d7f PM |
238 | hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) | |
239 | HPTE_V_BOLTED | hpte0_pgsize_encoding(psize); | |
240 | hp1 = hpte1_pgsize_encoding(psize) | | |
241 | HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX; | |
242 | ||
de56a948 | 243 | for (i = 0; i < npages; ++i) { |
c77162de | 244 | addr = i << porder; |
de56a948 | 245 | /* can't use hpt_hash since va > 64 bits */ |
3d089f84 DG |
246 | hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) |
247 | & kvmppc_hpt_mask(&kvm->arch.hpt); | |
de56a948 PM |
248 | /* |
249 | * We assume that the hash table is empty and no | |
250 | * vcpus are using it at this stage. Since we create | |
251 | * at most one HPTE per HPTEG, we just assume entry 7 | |
252 | * is available and use it. | |
253 | */ | |
8936dda4 | 254 | hash = (hash << 3) + 7; |
da9d1d7f PM |
255 | hp_v = hp0 | ((addr >> 16) & ~0x7fUL); |
256 | hp_r = hp1 | addr; | |
7ed661bf PM |
257 | ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, hash, hp_v, hp_r, |
258 | &idx_ret); | |
c77162de PM |
259 | if (ret != H_SUCCESS) { |
260 | pr_err("KVM: map_vrma at %lx failed, ret=%ld\n", | |
261 | addr, ret); | |
262 | break; | |
263 | } | |
de56a948 PM |
264 | } |
265 | } | |
266 | ||
267 | int kvmppc_mmu_hv_init(void) | |
268 | { | |
9e368f29 PM |
269 | unsigned long host_lpid, rsvd_lpid; |
270 | ||
b7557451 NP |
271 | if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE)) |
272 | return -EINVAL; | |
273 | ||
c17b98cf | 274 | /* POWER7 has 10-bit LPIDs (12-bit in POWER8) */ |
f3c99f97 PM |
275 | host_lpid = 0; |
276 | if (cpu_has_feature(CPU_FTR_HVMODE)) | |
277 | host_lpid = mfspr(SPRN_LPID); | |
c17b98cf | 278 | rsvd_lpid = LPID_RSVD; |
9e368f29 | 279 | |
043cc4d7 SW |
280 | kvmppc_init_lpid(rsvd_lpid + 1); |
281 | ||
282 | kvmppc_claim_lpid(host_lpid); | |
9e368f29 | 283 | /* rsvd_lpid is reserved for use in partition switching */ |
043cc4d7 | 284 | kvmppc_claim_lpid(rsvd_lpid); |
de56a948 PM |
285 | |
286 | return 0; | |
287 | } | |
288 | ||
de56a948 PM |
289 | static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) |
290 | { | |
e4e38121 MN |
291 | unsigned long msr = vcpu->arch.intr_msr; |
292 | ||
293 | /* If transactional, change to suspend mode on IRQ delivery */ | |
294 | if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr)) | |
295 | msr |= MSR_TS_S; | |
296 | else | |
297 | msr |= vcpu->arch.shregs.msr & MSR_TS_MASK; | |
298 | kvmppc_set_msr(vcpu, msr); | |
de56a948 PM |
299 | } |
300 | ||
025c9511 | 301 | static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, |
7ed661bf PM |
302 | long pte_index, unsigned long pteh, |
303 | unsigned long ptel, unsigned long *pte_idx_ret) | |
c77162de | 304 | { |
c77162de PM |
305 | long ret; |
306 | ||
342d3db7 PM |
307 | /* Protect linux PTE lookup from page table destruction */ |
308 | rcu_read_lock_sched(); /* this disables preemption too */ | |
7ed661bf PM |
309 | ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel, |
310 | current->mm->pgd, false, pte_idx_ret); | |
342d3db7 | 311 | rcu_read_unlock_sched(); |
c77162de PM |
312 | if (ret == H_TOO_HARD) { |
313 | /* this can't happen */ | |
314 | pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n"); | |
315 | ret = H_RESOURCE; /* or something */ | |
316 | } | |
317 | return ret; | |
318 | ||
319 | } | |
320 | ||
697d3899 PM |
321 | static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu, |
322 | gva_t eaddr) | |
323 | { | |
324 | u64 mask; | |
325 | int i; | |
326 | ||
327 | for (i = 0; i < vcpu->arch.slb_nr; i++) { | |
328 | if (!(vcpu->arch.slb[i].orige & SLB_ESID_V)) | |
329 | continue; | |
330 | ||
331 | if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T) | |
332 | mask = ESID_MASK_1T; | |
333 | else | |
334 | mask = ESID_MASK; | |
335 | ||
336 | if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0) | |
337 | return &vcpu->arch.slb[i]; | |
338 | } | |
339 | return NULL; | |
340 | } | |
341 | ||
342 | static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r, | |
343 | unsigned long ea) | |
344 | { | |
345 | unsigned long ra_mask; | |
346 | ||
8dc6cca5 | 347 | ra_mask = kvmppc_actual_pgsz(v, r) - 1; |
697d3899 PM |
348 | return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask); |
349 | } | |
350 | ||
de56a948 | 351 | static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, |
93b159b4 | 352 | struct kvmppc_pte *gpte, bool data, bool iswrite) |
de56a948 | 353 | { |
697d3899 PM |
354 | struct kvm *kvm = vcpu->kvm; |
355 | struct kvmppc_slb *slbe; | |
356 | unsigned long slb_v; | |
357 | unsigned long pp, key; | |
abb7c7dd | 358 | unsigned long v, orig_v, gr; |
6f22bd32 | 359 | __be64 *hptep; |
46dec40f | 360 | long int index; |
697d3899 PM |
361 | int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); |
362 | ||
18c3640c PM |
363 | if (kvm_is_radix(vcpu->kvm)) |
364 | return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite); | |
365 | ||
697d3899 PM |
366 | /* Get SLB entry */ |
367 | if (virtmode) { | |
368 | slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr); | |
369 | if (!slbe) | |
370 | return -EINVAL; | |
371 | slb_v = slbe->origv; | |
372 | } else { | |
373 | /* real mode access */ | |
374 | slb_v = vcpu->kvm->arch.vrma_slb_v; | |
375 | } | |
376 | ||
91648ec0 | 377 | preempt_disable(); |
697d3899 PM |
378 | /* Find the HPTE in the hash table */ |
379 | index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, | |
380 | HPTE_V_VALID | HPTE_V_ABSENT); | |
91648ec0 | 381 | if (index < 0) { |
382 | preempt_enable(); | |
697d3899 | 383 | return -ENOENT; |
91648ec0 | 384 | } |
3f9d4f5a | 385 | hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); |
abb7c7dd PM |
386 | v = orig_v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; |
387 | if (cpu_has_feature(CPU_FTR_ARCH_300)) | |
388 | v = hpte_new_to_old_v(v, be64_to_cpu(hptep[1])); | |
3f9d4f5a | 389 | gr = kvm->arch.hpt.rev[index].guest_rpte; |
697d3899 | 390 | |
abb7c7dd | 391 | unlock_hpte(hptep, orig_v); |
91648ec0 | 392 | preempt_enable(); |
697d3899 PM |
393 | |
394 | gpte->eaddr = eaddr; | |
395 | gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); | |
396 | ||
397 | /* Get PP bits and key for permission check */ | |
398 | pp = gr & (HPTE_R_PP0 | HPTE_R_PP); | |
399 | key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; | |
400 | key &= slb_v; | |
401 | ||
402 | /* Calculate permissions */ | |
403 | gpte->may_read = hpte_read_permission(pp, key); | |
404 | gpte->may_write = hpte_write_permission(pp, key); | |
405 | gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G)); | |
406 | ||
407 | /* Storage key permission check for POWER7 */ | |
c17b98cf | 408 | if (data && virtmode) { |
697d3899 PM |
409 | int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr); |
410 | if (amrfield & 1) | |
411 | gpte->may_read = 0; | |
412 | if (amrfield & 2) | |
413 | gpte->may_write = 0; | |
414 | } | |
415 | ||
416 | /* Get the guest physical address */ | |
417 | gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr); | |
418 | return 0; | |
419 | } | |
420 | ||
421 | /* | |
422 | * Quick test for whether an instruction is a load or a store. | |
423 | * If the instruction is a load or a store, then this will indicate | |
424 | * which it is, at least on server processors. (Embedded processors | |
425 | * have some external PID instructions that don't follow the rule | |
426 | * embodied here.) If the instruction isn't a load or store, then | |
427 | * this doesn't return anything useful. | |
428 | */ | |
429 | static int instruction_is_store(unsigned int instr) | |
430 | { | |
431 | unsigned int mask; | |
432 | ||
433 | mask = 0x10000000; | |
434 | if ((instr & 0xfc000000) == 0x7c000000) | |
435 | mask = 0x100; /* major opcode 31 */ | |
436 | return (instr & mask) != 0; | |
437 | } | |
438 | ||
5a319350 PM |
439 | int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, |
440 | unsigned long gpa, gva_t ea, int is_store) | |
697d3899 | 441 | { |
697d3899 | 442 | u32 last_inst; |
697d3899 | 443 | |
1b642257 SJS |
444 | /* |
445 | * Fast path - check if the guest physical address corresponds to a | |
446 | * device on the FAST_MMIO_BUS, if so we can avoid loading the | |
447 | * instruction all together, then we can just handle it and return. | |
448 | */ | |
449 | if (is_store) { | |
450 | int idx, ret; | |
451 | ||
452 | idx = srcu_read_lock(&vcpu->kvm->srcu); | |
453 | ret = kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, (gpa_t) gpa, 0, | |
454 | NULL); | |
455 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | |
456 | if (!ret) { | |
457 | kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); | |
458 | return RESUME_GUEST; | |
459 | } | |
460 | } | |
461 | ||
51f04726 | 462 | /* |
697d3899 PM |
463 | * If we fail, we just return to the guest and try executing it again. |
464 | */ | |
51f04726 MC |
465 | if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != |
466 | EMULATE_DONE) | |
467 | return RESUME_GUEST; | |
697d3899 PM |
468 | |
469 | /* | |
470 | * WARNING: We do not know for sure whether the instruction we just | |
471 | * read from memory is the same that caused the fault in the first | |
472 | * place. If the instruction we read is neither an load or a store, | |
473 | * then it can't access memory, so we don't need to worry about | |
474 | * enforcing access permissions. So, assuming it is a load or | |
475 | * store, we just check that its direction (load or store) is | |
476 | * consistent with the original fault, since that's what we | |
477 | * checked the access permissions against. If there is a mismatch | |
478 | * we just return and retry the instruction. | |
479 | */ | |
480 | ||
51f04726 | 481 | if (instruction_is_store(last_inst) != !!is_store) |
697d3899 PM |
482 | return RESUME_GUEST; |
483 | ||
484 | /* | |
485 | * Emulated accesses are emulated by looking at the hash for | |
486 | * translation once, then performing the access later. The | |
487 | * translation could be invalidated in the meantime in which | |
488 | * point performing the subsequent memory access on the old | |
489 | * physical address could possibly be a security hole for the | |
490 | * guest (but not the host). | |
491 | * | |
492 | * This is less of an issue for MMIO stores since they aren't | |
493 | * globally visible. It could be an issue for MMIO loads to | |
494 | * a certain extent but we'll ignore it for now. | |
495 | */ | |
496 | ||
497 | vcpu->arch.paddr_accessed = gpa; | |
6020c0f6 | 498 | vcpu->arch.vaddr_accessed = ea; |
697d3899 PM |
499 | return kvmppc_emulate_mmio(run, vcpu); |
500 | } | |
501 | ||
502 | int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
503 | unsigned long ea, unsigned long dsisr) | |
504 | { | |
505 | struct kvm *kvm = vcpu->kvm; | |
6f22bd32 | 506 | unsigned long hpte[3], r; |
abb7c7dd | 507 | unsigned long hnow_v, hnow_r; |
6f22bd32 | 508 | __be64 *hptep; |
342d3db7 | 509 | unsigned long mmu_seq, psize, pte_size; |
1066f772 | 510 | unsigned long gpa_base, gfn_base; |
70bddfef | 511 | unsigned long gpa, gfn, hva, pfn; |
697d3899 | 512 | struct kvm_memory_slot *memslot; |
342d3db7 | 513 | unsigned long *rmap; |
697d3899 | 514 | struct revmap_entry *rev; |
342d3db7 PM |
515 | struct page *page, *pages[1]; |
516 | long index, ret, npages; | |
30bda41a | 517 | bool is_ci; |
4cf302bc | 518 | unsigned int writing, write_ok; |
342d3db7 | 519 | struct vm_area_struct *vma; |
bad3b507 | 520 | unsigned long rcbits; |
a56ee9f8 | 521 | long mmio_update; |
697d3899 | 522 | |
5a319350 PM |
523 | if (kvm_is_radix(kvm)) |
524 | return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr); | |
525 | ||
697d3899 PM |
526 | /* |
527 | * Real-mode code has already searched the HPT and found the | |
528 | * entry we're interested in. Lock the entry and check that | |
529 | * it hasn't changed. If it has, just return and re-execute the | |
530 | * instruction. | |
531 | */ | |
532 | if (ea != vcpu->arch.pgfault_addr) | |
533 | return RESUME_GUEST; | |
a56ee9f8 YX |
534 | |
535 | if (vcpu->arch.pgfault_cache) { | |
536 | mmio_update = atomic64_read(&kvm->arch.mmio_update); | |
537 | if (mmio_update == vcpu->arch.pgfault_cache->mmio_update) { | |
538 | r = vcpu->arch.pgfault_cache->rpte; | |
8dc6cca5 PM |
539 | psize = kvmppc_actual_pgsz(vcpu->arch.pgfault_hpte[0], |
540 | r); | |
a56ee9f8 YX |
541 | gpa_base = r & HPTE_R_RPN & ~(psize - 1); |
542 | gfn_base = gpa_base >> PAGE_SHIFT; | |
543 | gpa = gpa_base | (ea & (psize - 1)); | |
544 | return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, | |
545 | dsisr & DSISR_ISSTORE); | |
546 | } | |
547 | } | |
697d3899 | 548 | index = vcpu->arch.pgfault_index; |
3f9d4f5a DG |
549 | hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); |
550 | rev = &kvm->arch.hpt.rev[index]; | |
697d3899 PM |
551 | preempt_disable(); |
552 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) | |
553 | cpu_relax(); | |
6f22bd32 AG |
554 | hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; |
555 | hpte[1] = be64_to_cpu(hptep[1]); | |
342d3db7 | 556 | hpte[2] = r = rev->guest_rpte; |
a4bd6eb0 | 557 | unlock_hpte(hptep, hpte[0]); |
697d3899 PM |
558 | preempt_enable(); |
559 | ||
abb7c7dd PM |
560 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
561 | hpte[0] = hpte_new_to_old_v(hpte[0], hpte[1]); | |
562 | hpte[1] = hpte_new_to_old_r(hpte[1]); | |
563 | } | |
697d3899 PM |
564 | if (hpte[0] != vcpu->arch.pgfault_hpte[0] || |
565 | hpte[1] != vcpu->arch.pgfault_hpte[1]) | |
566 | return RESUME_GUEST; | |
567 | ||
568 | /* Translate the logical address and get the page */ | |
8dc6cca5 | 569 | psize = kvmppc_actual_pgsz(hpte[0], r); |
1066f772 PM |
570 | gpa_base = r & HPTE_R_RPN & ~(psize - 1); |
571 | gfn_base = gpa_base >> PAGE_SHIFT; | |
572 | gpa = gpa_base | (ea & (psize - 1)); | |
70bddfef | 573 | gfn = gpa >> PAGE_SHIFT; |
697d3899 PM |
574 | memslot = gfn_to_memslot(kvm, gfn); |
575 | ||
3c78f78a SW |
576 | trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr); |
577 | ||
697d3899 | 578 | /* No memslot means it's an emulated MMIO region */ |
70bddfef | 579 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) |
6020c0f6 | 580 | return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, |
697d3899 | 581 | dsisr & DSISR_ISSTORE); |
697d3899 | 582 | |
1066f772 PM |
583 | /* |
584 | * This should never happen, because of the slot_is_aligned() | |
585 | * check in kvmppc_do_h_enter(). | |
586 | */ | |
587 | if (gfn_base < memslot->base_gfn) | |
588 | return -EFAULT; | |
589 | ||
342d3db7 PM |
590 | /* used to check for invalidations in progress */ |
591 | mmu_seq = kvm->mmu_notifier_seq; | |
592 | smp_rmb(); | |
593 | ||
3c78f78a | 594 | ret = -EFAULT; |
30bda41a | 595 | is_ci = false; |
342d3db7 PM |
596 | pfn = 0; |
597 | page = NULL; | |
598 | pte_size = PAGE_SIZE; | |
4cf302bc PM |
599 | writing = (dsisr & DSISR_ISSTORE) != 0; |
600 | /* If writing != 0, then the HPTE must allow writing, if we get here */ | |
601 | write_ok = writing; | |
342d3db7 | 602 | hva = gfn_to_hva_memslot(memslot, gfn); |
73b0140b | 603 | npages = get_user_pages_fast(hva, 1, writing ? FOLL_WRITE : 0, pages); |
342d3db7 PM |
604 | if (npages < 1) { |
605 | /* Check if it's an I/O mapping */ | |
606 | down_read(¤t->mm->mmap_sem); | |
607 | vma = find_vma(current->mm, hva); | |
608 | if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end && | |
609 | (vma->vm_flags & VM_PFNMAP)) { | |
610 | pfn = vma->vm_pgoff + | |
611 | ((hva - vma->vm_start) >> PAGE_SHIFT); | |
612 | pte_size = psize; | |
30bda41a | 613 | is_ci = pte_ci(__pte((pgprot_val(vma->vm_page_prot)))); |
4cf302bc | 614 | write_ok = vma->vm_flags & VM_WRITE; |
342d3db7 PM |
615 | } |
616 | up_read(¤t->mm->mmap_sem); | |
617 | if (!pfn) | |
3c78f78a | 618 | goto out_put; |
342d3db7 PM |
619 | } else { |
620 | page = pages[0]; | |
caaa4c80 | 621 | pfn = page_to_pfn(page); |
342d3db7 PM |
622 | if (PageHuge(page)) { |
623 | page = compound_head(page); | |
624 | pte_size <<= compound_order(page); | |
625 | } | |
4cf302bc PM |
626 | /* if the guest wants write access, see if that is OK */ |
627 | if (!writing && hpte_is_writable(r)) { | |
628 | pte_t *ptep, pte; | |
691e95fd | 629 | unsigned long flags; |
4cf302bc PM |
630 | /* |
631 | * We need to protect against page table destruction | |
7d6e7f7f | 632 | * hugepage split and collapse. |
4cf302bc | 633 | */ |
691e95fd | 634 | local_irq_save(flags); |
94171b19 AK |
635 | ptep = find_current_mm_pte(current->mm->pgd, |
636 | hva, NULL, NULL); | |
db7cb5b9 | 637 | if (ptep) { |
7d6e7f7f | 638 | pte = kvmppc_read_update_linux_pte(ptep, 1); |
d19469e8 | 639 | if (__pte_write(pte)) |
4cf302bc PM |
640 | write_ok = 1; |
641 | } | |
691e95fd | 642 | local_irq_restore(flags); |
4cf302bc | 643 | } |
342d3db7 PM |
644 | } |
645 | ||
342d3db7 PM |
646 | if (psize > pte_size) |
647 | goto out_put; | |
648 | ||
649 | /* Check WIMG vs. the actual page we're accessing */ | |
30bda41a AK |
650 | if (!hpte_cache_flags_ok(r, is_ci)) { |
651 | if (is_ci) | |
3c78f78a | 652 | goto out_put; |
342d3db7 PM |
653 | /* |
654 | * Allow guest to map emulated device memory as | |
655 | * uncacheable, but actually make it cacheable. | |
656 | */ | |
657 | r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; | |
658 | } | |
659 | ||
caaa4c80 PM |
660 | /* |
661 | * Set the HPTE to point to pfn. | |
662 | * Since the pfn is at PAGE_SIZE granularity, make sure we | |
663 | * don't mask out lower-order bits if psize < PAGE_SIZE. | |
664 | */ | |
665 | if (psize < PAGE_SIZE) | |
666 | psize = PAGE_SIZE; | |
f0585982 YX |
667 | r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) | |
668 | ((pfn << PAGE_SHIFT) & ~(psize - 1)); | |
4cf302bc PM |
669 | if (hpte_is_writable(r) && !write_ok) |
670 | r = hpte_make_readonly(r); | |
342d3db7 PM |
671 | ret = RESUME_GUEST; |
672 | preempt_disable(); | |
673 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) | |
674 | cpu_relax(); | |
abb7c7dd PM |
675 | hnow_v = be64_to_cpu(hptep[0]); |
676 | hnow_r = be64_to_cpu(hptep[1]); | |
677 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { | |
678 | hnow_v = hpte_new_to_old_v(hnow_v, hnow_r); | |
679 | hnow_r = hpte_new_to_old_r(hnow_r); | |
680 | } | |
38c53af8 PM |
681 | |
682 | /* | |
683 | * If the HPT is being resized, don't update the HPTE, | |
684 | * instead let the guest retry after the resize operation is complete. | |
072df813 | 685 | * The synchronization for mmu_ready test vs. set is provided |
38c53af8 PM |
686 | * by the HPTE lock. |
687 | */ | |
072df813 | 688 | if (!kvm->arch.mmu_ready) |
38c53af8 PM |
689 | goto out_unlock; |
690 | ||
abb7c7dd PM |
691 | if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] || |
692 | rev->guest_rpte != hpte[2]) | |
342d3db7 PM |
693 | /* HPTE has been changed under us; let the guest retry */ |
694 | goto out_unlock; | |
695 | hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; | |
696 | ||
1066f772 PM |
697 | /* Always put the HPTE in the rmap chain for the page base address */ |
698 | rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; | |
342d3db7 PM |
699 | lock_rmap(rmap); |
700 | ||
701 | /* Check if we might have been invalidated; let the guest retry if so */ | |
702 | ret = RESUME_GUEST; | |
8ca40a70 | 703 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) { |
342d3db7 PM |
704 | unlock_rmap(rmap); |
705 | goto out_unlock; | |
706 | } | |
4cf302bc | 707 | |
bad3b507 PM |
708 | /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */ |
709 | rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; | |
710 | r &= rcbits | ~(HPTE_R_R | HPTE_R_C); | |
711 | ||
6f22bd32 | 712 | if (be64_to_cpu(hptep[0]) & HPTE_V_VALID) { |
4cf302bc PM |
713 | /* HPTE was previously valid, so we need to invalidate it */ |
714 | unlock_rmap(rmap); | |
6f22bd32 | 715 | hptep[0] |= cpu_to_be64(HPTE_V_ABSENT); |
4cf302bc | 716 | kvmppc_invalidate_hpte(kvm, hptep, index); |
bad3b507 | 717 | /* don't lose previous R and C bits */ |
6f22bd32 | 718 | r |= be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C); |
4cf302bc PM |
719 | } else { |
720 | kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0); | |
721 | } | |
342d3db7 | 722 | |
abb7c7dd PM |
723 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
724 | r = hpte_old_to_new_r(hpte[0], r); | |
725 | hpte[0] = hpte_old_to_new_v(hpte[0]); | |
726 | } | |
6f22bd32 | 727 | hptep[1] = cpu_to_be64(r); |
342d3db7 | 728 | eieio(); |
a4bd6eb0 | 729 | __unlock_hpte(hptep, hpte[0]); |
342d3db7 PM |
730 | asm volatile("ptesync" : : : "memory"); |
731 | preempt_enable(); | |
4cf302bc | 732 | if (page && hpte_is_writable(r)) |
342d3db7 PM |
733 | SetPageDirty(page); |
734 | ||
735 | out_put: | |
3c78f78a SW |
736 | trace_kvm_page_fault_exit(vcpu, hpte, ret); |
737 | ||
de6c0b02 DG |
738 | if (page) { |
739 | /* | |
740 | * We drop pages[0] here, not page because page might | |
741 | * have been set to the head page of a compound, but | |
742 | * we have to drop the reference on the correct tail | |
743 | * page to match the get inside gup() | |
744 | */ | |
745 | put_page(pages[0]); | |
746 | } | |
342d3db7 PM |
747 | return ret; |
748 | ||
749 | out_unlock: | |
a4bd6eb0 | 750 | __unlock_hpte(hptep, be64_to_cpu(hptep[0])); |
342d3db7 PM |
751 | preempt_enable(); |
752 | goto out_put; | |
753 | } | |
754 | ||
18c3640c | 755 | void kvmppc_rmap_reset(struct kvm *kvm) |
a64fd707 PM |
756 | { |
757 | struct kvm_memslots *slots; | |
758 | struct kvm_memory_slot *memslot; | |
759 | int srcu_idx; | |
760 | ||
761 | srcu_idx = srcu_read_lock(&kvm->srcu); | |
9f6b8029 | 762 | slots = kvm_memslots(kvm); |
a64fd707 | 763 | kvm_for_each_memslot(memslot, slots) { |
234ff0b7 PM |
764 | /* Mutual exclusion with kvm_unmap_hva_range etc. */ |
765 | spin_lock(&kvm->mmu_lock); | |
a64fd707 PM |
766 | /* |
767 | * This assumes it is acceptable to lose reference and | |
768 | * change bits across a reset. | |
769 | */ | |
770 | memset(memslot->arch.rmap, 0, | |
771 | memslot->npages * sizeof(*memslot->arch.rmap)); | |
234ff0b7 | 772 | spin_unlock(&kvm->mmu_lock); |
a64fd707 PM |
773 | } |
774 | srcu_read_unlock(&kvm->srcu, srcu_idx); | |
775 | } | |
776 | ||
01756099 PM |
777 | typedef int (*hva_handler_fn)(struct kvm *kvm, struct kvm_memory_slot *memslot, |
778 | unsigned long gfn); | |
779 | ||
84504ef3 TY |
780 | static int kvm_handle_hva_range(struct kvm *kvm, |
781 | unsigned long start, | |
782 | unsigned long end, | |
01756099 | 783 | hva_handler_fn handler) |
342d3db7 PM |
784 | { |
785 | int ret; | |
786 | int retval = 0; | |
787 | struct kvm_memslots *slots; | |
788 | struct kvm_memory_slot *memslot; | |
789 | ||
790 | slots = kvm_memslots(kvm); | |
791 | kvm_for_each_memslot(memslot, slots) { | |
84504ef3 TY |
792 | unsigned long hva_start, hva_end; |
793 | gfn_t gfn, gfn_end; | |
794 | ||
795 | hva_start = max(start, memslot->userspace_addr); | |
796 | hva_end = min(end, memslot->userspace_addr + | |
797 | (memslot->npages << PAGE_SHIFT)); | |
798 | if (hva_start >= hva_end) | |
799 | continue; | |
800 | /* | |
801 | * {gfn(page) | page intersects with [hva_start, hva_end)} = | |
802 | * {gfn, gfn+1, ..., gfn_end-1}. | |
803 | */ | |
804 | gfn = hva_to_gfn_memslot(hva_start, memslot); | |
805 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); | |
342d3db7 | 806 | |
84504ef3 | 807 | for (; gfn < gfn_end; ++gfn) { |
01756099 | 808 | ret = handler(kvm, memslot, gfn); |
342d3db7 PM |
809 | retval |= ret; |
810 | } | |
811 | } | |
812 | ||
813 | return retval; | |
814 | } | |
815 | ||
84504ef3 | 816 | static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, |
01756099 | 817 | hva_handler_fn handler) |
84504ef3 TY |
818 | { |
819 | return kvm_handle_hva_range(kvm, hva, hva + 1, handler); | |
820 | } | |
821 | ||
639e4597 DG |
822 | /* Must be called with both HPTE and rmap locked */ |
823 | static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i, | |
e641a317 | 824 | struct kvm_memory_slot *memslot, |
639e4597 DG |
825 | unsigned long *rmapp, unsigned long gfn) |
826 | { | |
827 | __be64 *hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); | |
828 | struct revmap_entry *rev = kvm->arch.hpt.rev; | |
829 | unsigned long j, h; | |
830 | unsigned long ptel, psize, rcbits; | |
831 | ||
832 | j = rev[i].forw; | |
833 | if (j == i) { | |
834 | /* chain is now empty */ | |
835 | *rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX); | |
836 | } else { | |
837 | /* remove i from chain */ | |
838 | h = rev[i].back; | |
839 | rev[h].forw = j; | |
840 | rev[j].back = h; | |
841 | rev[i].forw = rev[i].back = i; | |
842 | *rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j; | |
843 | } | |
844 | ||
845 | /* Now check and modify the HPTE */ | |
846 | ptel = rev[i].guest_rpte; | |
8dc6cca5 | 847 | psize = kvmppc_actual_pgsz(be64_to_cpu(hptep[0]), ptel); |
639e4597 DG |
848 | if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) && |
849 | hpte_rpn(ptel, psize) == gfn) { | |
850 | hptep[0] |= cpu_to_be64(HPTE_V_ABSENT); | |
851 | kvmppc_invalidate_hpte(kvm, hptep, i); | |
852 | hptep[1] &= ~cpu_to_be64(HPTE_R_KEY_HI | HPTE_R_KEY_LO); | |
853 | /* Harvest R and C */ | |
854 | rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C); | |
855 | *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT; | |
e641a317 PM |
856 | if ((rcbits & HPTE_R_C) && memslot->dirty_bitmap) |
857 | kvmppc_update_dirty_map(memslot, gfn, psize); | |
639e4597 DG |
858 | if (rcbits & ~rev[i].guest_rpte) { |
859 | rev[i].guest_rpte = ptel | rcbits; | |
860 | note_hpte_modification(kvm, &rev[i]); | |
861 | } | |
862 | } | |
863 | } | |
864 | ||
01756099 | 865 | static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, |
342d3db7 PM |
866 | unsigned long gfn) |
867 | { | |
639e4597 | 868 | unsigned long i; |
6f22bd32 | 869 | __be64 *hptep; |
01756099 | 870 | unsigned long *rmapp; |
342d3db7 | 871 | |
01756099 | 872 | rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; |
342d3db7 | 873 | for (;;) { |
bad3b507 | 874 | lock_rmap(rmapp); |
342d3db7 | 875 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { |
bad3b507 | 876 | unlock_rmap(rmapp); |
342d3db7 PM |
877 | break; |
878 | } | |
879 | ||
880 | /* | |
881 | * To avoid an ABBA deadlock with the HPTE lock bit, | |
bad3b507 PM |
882 | * we can't spin on the HPTE lock while holding the |
883 | * rmap chain lock. | |
342d3db7 PM |
884 | */ |
885 | i = *rmapp & KVMPPC_RMAP_INDEX; | |
3f9d4f5a | 886 | hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); |
bad3b507 PM |
887 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { |
888 | /* unlock rmap before spinning on the HPTE lock */ | |
889 | unlock_rmap(rmapp); | |
6f22bd32 | 890 | while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK) |
bad3b507 PM |
891 | cpu_relax(); |
892 | continue; | |
893 | } | |
342d3db7 | 894 | |
e641a317 | 895 | kvmppc_unmap_hpte(kvm, i, memslot, rmapp, gfn); |
bad3b507 | 896 | unlock_rmap(rmapp); |
a4bd6eb0 | 897 | __unlock_hpte(hptep, be64_to_cpu(hptep[0])); |
342d3db7 PM |
898 | } |
899 | return 0; | |
900 | } | |
901 | ||
3a167bea | 902 | int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end) |
b3ae2096 | 903 | { |
01756099 PM |
904 | hva_handler_fn handler; |
905 | ||
906 | handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp; | |
907 | kvm_handle_hva_range(kvm, start, end, handler); | |
b3ae2096 TY |
908 | return 0; |
909 | } | |
910 | ||
3a167bea AK |
911 | void kvmppc_core_flush_memslot_hv(struct kvm *kvm, |
912 | struct kvm_memory_slot *memslot) | |
dfe49dbd | 913 | { |
dfe49dbd PM |
914 | unsigned long gfn; |
915 | unsigned long n; | |
01756099 | 916 | unsigned long *rmapp; |
dfe49dbd | 917 | |
dfe49dbd | 918 | gfn = memslot->base_gfn; |
01756099 | 919 | rmapp = memslot->arch.rmap; |
5af3e9d0 PM |
920 | if (kvm_is_radix(kvm)) { |
921 | kvmppc_radix_flush_memslot(kvm, memslot); | |
922 | return; | |
923 | } | |
924 | ||
01756099 | 925 | for (n = memslot->npages; n; --n, ++gfn) { |
dfe49dbd PM |
926 | /* |
927 | * Testing the present bit without locking is OK because | |
928 | * the memslot has been marked invalid already, and hence | |
929 | * no new HPTEs referencing this page can be created, | |
930 | * thus the present bit can't go from 0 to 1. | |
931 | */ | |
932 | if (*rmapp & KVMPPC_RMAP_PRESENT) | |
01756099 | 933 | kvm_unmap_rmapp(kvm, memslot, gfn); |
dfe49dbd | 934 | ++rmapp; |
dfe49dbd PM |
935 | } |
936 | } | |
937 | ||
01756099 | 938 | static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, |
342d3db7 PM |
939 | unsigned long gfn) |
940 | { | |
3f9d4f5a | 941 | struct revmap_entry *rev = kvm->arch.hpt.rev; |
55514893 | 942 | unsigned long head, i, j; |
6f22bd32 | 943 | __be64 *hptep; |
55514893 | 944 | int ret = 0; |
01756099 | 945 | unsigned long *rmapp; |
55514893 | 946 | |
01756099 | 947 | rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; |
55514893 PM |
948 | retry: |
949 | lock_rmap(rmapp); | |
950 | if (*rmapp & KVMPPC_RMAP_REFERENCED) { | |
951 | *rmapp &= ~KVMPPC_RMAP_REFERENCED; | |
952 | ret = 1; | |
953 | } | |
954 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { | |
955 | unlock_rmap(rmapp); | |
956 | return ret; | |
957 | } | |
958 | ||
959 | i = head = *rmapp & KVMPPC_RMAP_INDEX; | |
960 | do { | |
3f9d4f5a | 961 | hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); |
55514893 PM |
962 | j = rev[i].forw; |
963 | ||
964 | /* If this HPTE isn't referenced, ignore it */ | |
6f22bd32 | 965 | if (!(be64_to_cpu(hptep[1]) & HPTE_R_R)) |
55514893 PM |
966 | continue; |
967 | ||
968 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { | |
969 | /* unlock rmap before spinning on the HPTE lock */ | |
970 | unlock_rmap(rmapp); | |
6f22bd32 | 971 | while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK) |
55514893 PM |
972 | cpu_relax(); |
973 | goto retry; | |
974 | } | |
975 | ||
976 | /* Now check and modify the HPTE */ | |
6f22bd32 AG |
977 | if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) && |
978 | (be64_to_cpu(hptep[1]) & HPTE_R_R)) { | |
55514893 | 979 | kvmppc_clear_ref_hpte(kvm, hptep, i); |
a1b4a0f6 PM |
980 | if (!(rev[i].guest_rpte & HPTE_R_R)) { |
981 | rev[i].guest_rpte |= HPTE_R_R; | |
982 | note_hpte_modification(kvm, &rev[i]); | |
983 | } | |
55514893 PM |
984 | ret = 1; |
985 | } | |
a4bd6eb0 | 986 | __unlock_hpte(hptep, be64_to_cpu(hptep[0])); |
55514893 PM |
987 | } while ((i = j) != head); |
988 | ||
989 | unlock_rmap(rmapp); | |
990 | return ret; | |
342d3db7 PM |
991 | } |
992 | ||
57128468 | 993 | int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end) |
342d3db7 | 994 | { |
01756099 PM |
995 | hva_handler_fn handler; |
996 | ||
997 | handler = kvm_is_radix(kvm) ? kvm_age_radix : kvm_age_rmapp; | |
998 | return kvm_handle_hva_range(kvm, start, end, handler); | |
342d3db7 PM |
999 | } |
1000 | ||
01756099 | 1001 | static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, |
342d3db7 PM |
1002 | unsigned long gfn) |
1003 | { | |
3f9d4f5a | 1004 | struct revmap_entry *rev = kvm->arch.hpt.rev; |
55514893 PM |
1005 | unsigned long head, i, j; |
1006 | unsigned long *hp; | |
1007 | int ret = 1; | |
01756099 | 1008 | unsigned long *rmapp; |
55514893 | 1009 | |
01756099 | 1010 | rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; |
55514893 PM |
1011 | if (*rmapp & KVMPPC_RMAP_REFERENCED) |
1012 | return 1; | |
1013 | ||
1014 | lock_rmap(rmapp); | |
1015 | if (*rmapp & KVMPPC_RMAP_REFERENCED) | |
1016 | goto out; | |
1017 | ||
1018 | if (*rmapp & KVMPPC_RMAP_PRESENT) { | |
1019 | i = head = *rmapp & KVMPPC_RMAP_INDEX; | |
1020 | do { | |
3f9d4f5a | 1021 | hp = (unsigned long *)(kvm->arch.hpt.virt + (i << 4)); |
55514893 | 1022 | j = rev[i].forw; |
6f22bd32 | 1023 | if (be64_to_cpu(hp[1]) & HPTE_R_R) |
55514893 PM |
1024 | goto out; |
1025 | } while ((i = j) != head); | |
1026 | } | |
1027 | ret = 0; | |
1028 | ||
1029 | out: | |
1030 | unlock_rmap(rmapp); | |
1031 | return ret; | |
342d3db7 PM |
1032 | } |
1033 | ||
3a167bea | 1034 | int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva) |
342d3db7 | 1035 | { |
01756099 PM |
1036 | hva_handler_fn handler; |
1037 | ||
1038 | handler = kvm_is_radix(kvm) ? kvm_test_age_radix : kvm_test_age_rmapp; | |
1039 | return kvm_handle_hva(kvm, hva, handler); | |
342d3db7 PM |
1040 | } |
1041 | ||
3a167bea | 1042 | void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte) |
342d3db7 | 1043 | { |
01756099 PM |
1044 | hva_handler_fn handler; |
1045 | ||
1046 | handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp; | |
1047 | kvm_handle_hva(kvm, hva, handler); | |
de56a948 PM |
1048 | } |
1049 | ||
6c576e74 PM |
1050 | static int vcpus_running(struct kvm *kvm) |
1051 | { | |
1052 | return atomic_read(&kvm->arch.vcpus_running) != 0; | |
1053 | } | |
1054 | ||
687414be AK |
1055 | /* |
1056 | * Returns the number of system pages that are dirty. | |
1057 | * This can be more than 1 if we find a huge-page HPTE. | |
1058 | */ | |
1059 | static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) | |
82ed3616 | 1060 | { |
3f9d4f5a | 1061 | struct revmap_entry *rev = kvm->arch.hpt.rev; |
82ed3616 | 1062 | unsigned long head, i, j; |
687414be | 1063 | unsigned long n; |
6c576e74 | 1064 | unsigned long v, r; |
6f22bd32 | 1065 | __be64 *hptep; |
687414be | 1066 | int npages_dirty = 0; |
82ed3616 PM |
1067 | |
1068 | retry: | |
1069 | lock_rmap(rmapp); | |
82ed3616 PM |
1070 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { |
1071 | unlock_rmap(rmapp); | |
687414be | 1072 | return npages_dirty; |
82ed3616 PM |
1073 | } |
1074 | ||
1075 | i = head = *rmapp & KVMPPC_RMAP_INDEX; | |
1076 | do { | |
6f22bd32 | 1077 | unsigned long hptep1; |
3f9d4f5a | 1078 | hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); |
82ed3616 PM |
1079 | j = rev[i].forw; |
1080 | ||
6c576e74 PM |
1081 | /* |
1082 | * Checking the C (changed) bit here is racy since there | |
1083 | * is no guarantee about when the hardware writes it back. | |
1084 | * If the HPTE is not writable then it is stable since the | |
1085 | * page can't be written to, and we would have done a tlbie | |
1086 | * (which forces the hardware to complete any writeback) | |
1087 | * when making the HPTE read-only. | |
1088 | * If vcpus are running then this call is racy anyway | |
1089 | * since the page could get dirtied subsequently, so we | |
1090 | * expect there to be a further call which would pick up | |
1091 | * any delayed C bit writeback. | |
1092 | * Otherwise we need to do the tlbie even if C==0 in | |
1093 | * order to pick up any delayed writeback of C. | |
1094 | */ | |
6f22bd32 AG |
1095 | hptep1 = be64_to_cpu(hptep[1]); |
1096 | if (!(hptep1 & HPTE_R_C) && | |
1097 | (!hpte_is_writable(hptep1) || vcpus_running(kvm))) | |
82ed3616 PM |
1098 | continue; |
1099 | ||
1100 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { | |
1101 | /* unlock rmap before spinning on the HPTE lock */ | |
1102 | unlock_rmap(rmapp); | |
6f22bd32 | 1103 | while (hptep[0] & cpu_to_be64(HPTE_V_HVLOCK)) |
82ed3616 PM |
1104 | cpu_relax(); |
1105 | goto retry; | |
1106 | } | |
1107 | ||
1108 | /* Now check and modify the HPTE */ | |
f6fb9e84 | 1109 | if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID))) { |
a4bd6eb0 | 1110 | __unlock_hpte(hptep, be64_to_cpu(hptep[0])); |
6c576e74 | 1111 | continue; |
f6fb9e84 | 1112 | } |
6c576e74 PM |
1113 | |
1114 | /* need to make it temporarily absent so C is stable */ | |
6f22bd32 | 1115 | hptep[0] |= cpu_to_be64(HPTE_V_ABSENT); |
6c576e74 | 1116 | kvmppc_invalidate_hpte(kvm, hptep, i); |
6f22bd32 AG |
1117 | v = be64_to_cpu(hptep[0]); |
1118 | r = be64_to_cpu(hptep[1]); | |
6c576e74 | 1119 | if (r & HPTE_R_C) { |
6f22bd32 | 1120 | hptep[1] = cpu_to_be64(r & ~HPTE_R_C); |
a1b4a0f6 PM |
1121 | if (!(rev[i].guest_rpte & HPTE_R_C)) { |
1122 | rev[i].guest_rpte |= HPTE_R_C; | |
1123 | note_hpte_modification(kvm, &rev[i]); | |
1124 | } | |
8dc6cca5 | 1125 | n = kvmppc_actual_pgsz(v, r); |
687414be AK |
1126 | n = (n + PAGE_SIZE - 1) >> PAGE_SHIFT; |
1127 | if (n > npages_dirty) | |
1128 | npages_dirty = n; | |
6c576e74 | 1129 | eieio(); |
82ed3616 | 1130 | } |
a4bd6eb0 | 1131 | v &= ~HPTE_V_ABSENT; |
6c576e74 | 1132 | v |= HPTE_V_VALID; |
a4bd6eb0 | 1133 | __unlock_hpte(hptep, v); |
82ed3616 PM |
1134 | } while ((i = j) != head); |
1135 | ||
1136 | unlock_rmap(rmapp); | |
687414be | 1137 | return npages_dirty; |
82ed3616 PM |
1138 | } |
1139 | ||
8f7b79b8 | 1140 | void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa, |
c35635ef PM |
1141 | struct kvm_memory_slot *memslot, |
1142 | unsigned long *map) | |
1143 | { | |
1144 | unsigned long gfn; | |
1145 | ||
1146 | if (!vpa->dirty || !vpa->pinned_addr) | |
1147 | return; | |
1148 | gfn = vpa->gpa >> PAGE_SHIFT; | |
1149 | if (gfn < memslot->base_gfn || | |
1150 | gfn >= memslot->base_gfn + memslot->npages) | |
1151 | return; | |
1152 | ||
1153 | vpa->dirty = false; | |
1154 | if (map) | |
1155 | __set_bit_le(gfn - memslot->base_gfn, map); | |
1156 | } | |
1157 | ||
8f7b79b8 PM |
1158 | long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm, |
1159 | struct kvm_memory_slot *memslot, unsigned long *map) | |
82ed3616 | 1160 | { |
e641a317 | 1161 | unsigned long i; |
dfe49dbd | 1162 | unsigned long *rmapp; |
82ed3616 PM |
1163 | |
1164 | preempt_disable(); | |
d89cc617 | 1165 | rmapp = memslot->arch.rmap; |
82ed3616 | 1166 | for (i = 0; i < memslot->npages; ++i) { |
687414be AK |
1167 | int npages = kvm_test_clear_dirty_npages(kvm, rmapp); |
1168 | /* | |
1169 | * Note that if npages > 0 then i must be a multiple of npages, | |
1170 | * since we always put huge-page HPTEs in the rmap chain | |
1171 | * corresponding to their page base address. | |
1172 | */ | |
e641a317 PM |
1173 | if (npages) |
1174 | set_dirty_bits(map, i, npages); | |
82ed3616 PM |
1175 | ++rmapp; |
1176 | } | |
1177 | preempt_enable(); | |
1178 | return 0; | |
1179 | } | |
1180 | ||
93e60249 PM |
1181 | void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, |
1182 | unsigned long *nb_ret) | |
1183 | { | |
1184 | struct kvm_memory_slot *memslot; | |
1185 | unsigned long gfn = gpa >> PAGE_SHIFT; | |
342d3db7 PM |
1186 | struct page *page, *pages[1]; |
1187 | int npages; | |
c35635ef | 1188 | unsigned long hva, offset; |
2c9097e4 | 1189 | int srcu_idx; |
93e60249 | 1190 | |
2c9097e4 | 1191 | srcu_idx = srcu_read_lock(&kvm->srcu); |
93e60249 PM |
1192 | memslot = gfn_to_memslot(kvm, gfn); |
1193 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) | |
2c9097e4 | 1194 | goto err; |
c17b98cf | 1195 | hva = gfn_to_hva_memslot(memslot, gfn); |
73b0140b | 1196 | npages = get_user_pages_fast(hva, 1, FOLL_WRITE, pages); |
c17b98cf PM |
1197 | if (npages < 1) |
1198 | goto err; | |
1199 | page = pages[0]; | |
2c9097e4 PM |
1200 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
1201 | ||
c35635ef | 1202 | offset = gpa & (PAGE_SIZE - 1); |
93e60249 | 1203 | if (nb_ret) |
c35635ef | 1204 | *nb_ret = PAGE_SIZE - offset; |
93e60249 | 1205 | return page_address(page) + offset; |
2c9097e4 PM |
1206 | |
1207 | err: | |
1208 | srcu_read_unlock(&kvm->srcu, srcu_idx); | |
1209 | return NULL; | |
93e60249 PM |
1210 | } |
1211 | ||
c35635ef PM |
1212 | void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa, |
1213 | bool dirty) | |
93e60249 PM |
1214 | { |
1215 | struct page *page = virt_to_page(va); | |
c35635ef PM |
1216 | struct kvm_memory_slot *memslot; |
1217 | unsigned long gfn; | |
c35635ef | 1218 | int srcu_idx; |
93e60249 | 1219 | |
93e60249 | 1220 | put_page(page); |
c35635ef | 1221 | |
c17b98cf | 1222 | if (!dirty) |
c35635ef PM |
1223 | return; |
1224 | ||
e641a317 | 1225 | /* We need to mark this page dirty in the memslot dirty_bitmap, if any */ |
c35635ef PM |
1226 | gfn = gpa >> PAGE_SHIFT; |
1227 | srcu_idx = srcu_read_lock(&kvm->srcu); | |
1228 | memslot = gfn_to_memslot(kvm, gfn); | |
e641a317 PM |
1229 | if (memslot && memslot->dirty_bitmap) |
1230 | set_bit_le(gfn - memslot->base_gfn, memslot->dirty_bitmap); | |
c35635ef | 1231 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
93e60249 PM |
1232 | } |
1233 | ||
5e985969 DG |
1234 | /* |
1235 | * HPT resizing | |
1236 | */ | |
1237 | static int resize_hpt_allocate(struct kvm_resize_hpt *resize) | |
1238 | { | |
b5baa687 DG |
1239 | int rc; |
1240 | ||
1241 | rc = kvmppc_allocate_hpt(&resize->hpt, resize->order); | |
1242 | if (rc < 0) | |
1243 | return rc; | |
1244 | ||
1245 | resize_hpt_debug(resize, "resize_hpt_allocate(): HPT @ 0x%lx\n", | |
1246 | resize->hpt.virt); | |
1247 | ||
5e985969 DG |
1248 | return 0; |
1249 | } | |
1250 | ||
b5baa687 DG |
1251 | static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize, |
1252 | unsigned long idx) | |
1253 | { | |
1254 | struct kvm *kvm = resize->kvm; | |
1255 | struct kvm_hpt_info *old = &kvm->arch.hpt; | |
1256 | struct kvm_hpt_info *new = &resize->hpt; | |
1257 | unsigned long old_hash_mask = (1ULL << (old->order - 7)) - 1; | |
1258 | unsigned long new_hash_mask = (1ULL << (new->order - 7)) - 1; | |
1259 | __be64 *hptep, *new_hptep; | |
1260 | unsigned long vpte, rpte, guest_rpte; | |
1261 | int ret; | |
1262 | struct revmap_entry *rev; | |
ded13fc1 | 1263 | unsigned long apsize, avpn, pteg, hash; |
b5baa687 | 1264 | unsigned long new_idx, new_pteg, replace_vpte; |
ded13fc1 | 1265 | int pshift; |
b5baa687 DG |
1266 | |
1267 | hptep = (__be64 *)(old->virt + (idx << 4)); | |
1268 | ||
1269 | /* Guest is stopped, so new HPTEs can't be added or faulted | |
1270 | * in, only unmapped or altered by host actions. So, it's | |
1271 | * safe to check this before we take the HPTE lock */ | |
1272 | vpte = be64_to_cpu(hptep[0]); | |
1273 | if (!(vpte & HPTE_V_VALID) && !(vpte & HPTE_V_ABSENT)) | |
1274 | return 0; /* nothing to do */ | |
1275 | ||
1276 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) | |
1277 | cpu_relax(); | |
1278 | ||
1279 | vpte = be64_to_cpu(hptep[0]); | |
1280 | ||
1281 | ret = 0; | |
1282 | if (!(vpte & HPTE_V_VALID) && !(vpte & HPTE_V_ABSENT)) | |
1283 | /* Nothing to do */ | |
1284 | goto out; | |
1285 | ||
790a9df5 DG |
1286 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
1287 | rpte = be64_to_cpu(hptep[1]); | |
1288 | vpte = hpte_new_to_old_v(vpte, rpte); | |
1289 | } | |
1290 | ||
b5baa687 DG |
1291 | /* Unmap */ |
1292 | rev = &old->rev[idx]; | |
1293 | guest_rpte = rev->guest_rpte; | |
1294 | ||
1295 | ret = -EIO; | |
8dc6cca5 | 1296 | apsize = kvmppc_actual_pgsz(vpte, guest_rpte); |
b5baa687 DG |
1297 | if (!apsize) |
1298 | goto out; | |
1299 | ||
1300 | if (vpte & HPTE_V_VALID) { | |
1301 | unsigned long gfn = hpte_rpn(guest_rpte, apsize); | |
1302 | int srcu_idx = srcu_read_lock(&kvm->srcu); | |
1303 | struct kvm_memory_slot *memslot = | |
1304 | __gfn_to_memslot(kvm_memslots(kvm), gfn); | |
1305 | ||
1306 | if (memslot) { | |
1307 | unsigned long *rmapp; | |
1308 | rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; | |
1309 | ||
1310 | lock_rmap(rmapp); | |
e641a317 | 1311 | kvmppc_unmap_hpte(kvm, idx, memslot, rmapp, gfn); |
b5baa687 DG |
1312 | unlock_rmap(rmapp); |
1313 | } | |
1314 | ||
1315 | srcu_read_unlock(&kvm->srcu, srcu_idx); | |
1316 | } | |
1317 | ||
1318 | /* Reload PTE after unmap */ | |
1319 | vpte = be64_to_cpu(hptep[0]); | |
b5baa687 DG |
1320 | BUG_ON(vpte & HPTE_V_VALID); |
1321 | BUG_ON(!(vpte & HPTE_V_ABSENT)); | |
1322 | ||
1323 | ret = 0; | |
1324 | if (!(vpte & HPTE_V_BOLTED)) | |
1325 | goto out; | |
1326 | ||
1327 | rpte = be64_to_cpu(hptep[1]); | |
790a9df5 DG |
1328 | |
1329 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { | |
1330 | vpte = hpte_new_to_old_v(vpte, rpte); | |
1331 | rpte = hpte_new_to_old_r(rpte); | |
1332 | } | |
1333 | ||
ded13fc1 PM |
1334 | pshift = kvmppc_hpte_base_page_shift(vpte, rpte); |
1335 | avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23); | |
b5baa687 DG |
1336 | pteg = idx / HPTES_PER_GROUP; |
1337 | if (vpte & HPTE_V_SECONDARY) | |
1338 | pteg = ~pteg; | |
1339 | ||
1340 | if (!(vpte & HPTE_V_1TB_SEG)) { | |
1341 | unsigned long offset, vsid; | |
1342 | ||
1343 | /* We only have 28 - 23 bits of offset in avpn */ | |
1344 | offset = (avpn & 0x1f) << 23; | |
1345 | vsid = avpn >> 5; | |
1346 | /* We can find more bits from the pteg value */ | |
ded13fc1 PM |
1347 | if (pshift < 23) |
1348 | offset |= ((vsid ^ pteg) & old_hash_mask) << pshift; | |
b5baa687 | 1349 | |
ded13fc1 | 1350 | hash = vsid ^ (offset >> pshift); |
b5baa687 DG |
1351 | } else { |
1352 | unsigned long offset, vsid; | |
1353 | ||
1354 | /* We only have 40 - 23 bits of seg_off in avpn */ | |
1355 | offset = (avpn & 0x1ffff) << 23; | |
1356 | vsid = avpn >> 17; | |
ded13fc1 PM |
1357 | if (pshift < 23) |
1358 | offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) << pshift; | |
b5baa687 | 1359 | |
ded13fc1 | 1360 | hash = vsid ^ (vsid << 25) ^ (offset >> pshift); |
b5baa687 DG |
1361 | } |
1362 | ||
1363 | new_pteg = hash & new_hash_mask; | |
05f2bb03 PM |
1364 | if (vpte & HPTE_V_SECONDARY) |
1365 | new_pteg = ~hash & new_hash_mask; | |
b5baa687 DG |
1366 | |
1367 | new_idx = new_pteg * HPTES_PER_GROUP + (idx % HPTES_PER_GROUP); | |
1368 | new_hptep = (__be64 *)(new->virt + (new_idx << 4)); | |
1369 | ||
1370 | replace_vpte = be64_to_cpu(new_hptep[0]); | |
790a9df5 DG |
1371 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
1372 | unsigned long replace_rpte = be64_to_cpu(new_hptep[1]); | |
1373 | replace_vpte = hpte_new_to_old_v(replace_vpte, replace_rpte); | |
1374 | } | |
b5baa687 DG |
1375 | |
1376 | if (replace_vpte & (HPTE_V_VALID | HPTE_V_ABSENT)) { | |
1377 | BUG_ON(new->order >= old->order); | |
1378 | ||
1379 | if (replace_vpte & HPTE_V_BOLTED) { | |
1380 | if (vpte & HPTE_V_BOLTED) | |
1381 | /* Bolted collision, nothing we can do */ | |
1382 | ret = -ENOSPC; | |
1383 | /* Discard the new HPTE */ | |
1384 | goto out; | |
1385 | } | |
1386 | ||
1387 | /* Discard the previous HPTE */ | |
1388 | } | |
1389 | ||
790a9df5 DG |
1390 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
1391 | rpte = hpte_old_to_new_r(vpte, rpte); | |
1392 | vpte = hpte_old_to_new_v(vpte); | |
1393 | } | |
1394 | ||
b5baa687 DG |
1395 | new_hptep[1] = cpu_to_be64(rpte); |
1396 | new->rev[new_idx].guest_rpte = guest_rpte; | |
1397 | /* No need for a barrier, since new HPT isn't active */ | |
1398 | new_hptep[0] = cpu_to_be64(vpte); | |
1399 | unlock_hpte(new_hptep, vpte); | |
1400 | ||
1401 | out: | |
1402 | unlock_hpte(hptep, vpte); | |
1403 | return ret; | |
1404 | } | |
1405 | ||
5e985969 DG |
1406 | static int resize_hpt_rehash(struct kvm_resize_hpt *resize) |
1407 | { | |
b5baa687 DG |
1408 | struct kvm *kvm = resize->kvm; |
1409 | unsigned long i; | |
1410 | int rc; | |
1411 | ||
1412 | for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) { | |
1413 | rc = resize_hpt_rehash_hpte(resize, i); | |
1414 | if (rc != 0) | |
1415 | return rc; | |
1416 | } | |
1417 | ||
1418 | return 0; | |
5e985969 DG |
1419 | } |
1420 | ||
1421 | static void resize_hpt_pivot(struct kvm_resize_hpt *resize) | |
1422 | { | |
b5baa687 DG |
1423 | struct kvm *kvm = resize->kvm; |
1424 | struct kvm_hpt_info hpt_tmp; | |
1425 | ||
1426 | /* Exchange the pending tables in the resize structure with | |
1427 | * the active tables */ | |
1428 | ||
1429 | resize_hpt_debug(resize, "resize_hpt_pivot()\n"); | |
1430 | ||
1431 | spin_lock(&kvm->mmu_lock); | |
1432 | asm volatile("ptesync" : : : "memory"); | |
1433 | ||
1434 | hpt_tmp = kvm->arch.hpt; | |
1435 | kvmppc_set_hpt(kvm, &resize->hpt); | |
1436 | resize->hpt = hpt_tmp; | |
1437 | ||
1438 | spin_unlock(&kvm->mmu_lock); | |
1439 | ||
1440 | synchronize_srcu_expedited(&kvm->srcu); | |
1441 | ||
790a9df5 DG |
1442 | if (cpu_has_feature(CPU_FTR_ARCH_300)) |
1443 | kvmppc_setup_partition_table(kvm); | |
1444 | ||
b5baa687 | 1445 | resize_hpt_debug(resize, "resize_hpt_pivot() done\n"); |
5e985969 DG |
1446 | } |
1447 | ||
1448 | static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) | |
1449 | { | |
4ed11aee SP |
1450 | if (WARN_ON(!mutex_is_locked(&kvm->lock))) |
1451 | return; | |
b5baa687 | 1452 | |
5b73d634 DG |
1453 | if (!resize) |
1454 | return; | |
1455 | ||
4ed11aee SP |
1456 | if (resize->error != -EBUSY) { |
1457 | if (resize->hpt.virt) | |
1458 | kvmppc_free_hpt(&resize->hpt); | |
1459 | kfree(resize); | |
1460 | } | |
b5baa687 | 1461 | |
4ed11aee SP |
1462 | if (kvm->arch.resize_hpt == resize) |
1463 | kvm->arch.resize_hpt = NULL; | |
5e985969 DG |
1464 | } |
1465 | ||
1466 | static void resize_hpt_prepare_work(struct work_struct *work) | |
1467 | { | |
1468 | struct kvm_resize_hpt *resize = container_of(work, | |
1469 | struct kvm_resize_hpt, | |
1470 | work); | |
1471 | struct kvm *kvm = resize->kvm; | |
4ed11aee | 1472 | int err = 0; |
5e985969 | 1473 | |
3073774e SP |
1474 | if (WARN_ON(resize->error != -EBUSY)) |
1475 | return; | |
1476 | ||
4ed11aee | 1477 | mutex_lock(&kvm->lock); |
5e985969 | 1478 | |
4ed11aee SP |
1479 | /* Request is still current? */ |
1480 | if (kvm->arch.resize_hpt == resize) { | |
1481 | /* We may request large allocations here: | |
1482 | * do not sleep with kvm->lock held for a while. | |
1483 | */ | |
1484 | mutex_unlock(&kvm->lock); | |
5e985969 | 1485 | |
4ed11aee SP |
1486 | resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", |
1487 | resize->order); | |
3073774e | 1488 | |
4ed11aee SP |
1489 | err = resize_hpt_allocate(resize); |
1490 | ||
1491 | /* We have strict assumption about -EBUSY | |
1492 | * when preparing for HPT resize. | |
1493 | */ | |
1494 | if (WARN_ON(err == -EBUSY)) | |
1495 | err = -EINPROGRESS; | |
1496 | ||
1497 | mutex_lock(&kvm->lock); | |
1498 | /* It is possible that kvm->arch.resize_hpt != resize | |
1499 | * after we grab kvm->lock again. | |
1500 | */ | |
1501 | } | |
5e985969 DG |
1502 | |
1503 | resize->error = err; | |
5e985969 | 1504 | |
4ed11aee SP |
1505 | if (kvm->arch.resize_hpt != resize) |
1506 | resize_hpt_release(kvm, resize); | |
1507 | ||
5e985969 DG |
1508 | mutex_unlock(&kvm->lock); |
1509 | } | |
1510 | ||
1511 | long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, | |
1512 | struct kvm_ppc_resize_hpt *rhpt) | |
1513 | { | |
1514 | unsigned long flags = rhpt->flags; | |
1515 | unsigned long shift = rhpt->shift; | |
1516 | struct kvm_resize_hpt *resize; | |
1517 | int ret; | |
1518 | ||
891f1ebf | 1519 | if (flags != 0 || kvm_is_radix(kvm)) |
5e985969 DG |
1520 | return -EINVAL; |
1521 | ||
1522 | if (shift && ((shift < 18) || (shift > 46))) | |
1523 | return -EINVAL; | |
1524 | ||
1525 | mutex_lock(&kvm->lock); | |
1526 | ||
1527 | resize = kvm->arch.resize_hpt; | |
1528 | ||
1529 | if (resize) { | |
1530 | if (resize->order == shift) { | |
3073774e SP |
1531 | /* Suitable resize in progress? */ |
1532 | ret = resize->error; | |
1533 | if (ret == -EBUSY) | |
5e985969 | 1534 | ret = 100; /* estimated time in ms */ |
3073774e SP |
1535 | else if (ret) |
1536 | resize_hpt_release(kvm, resize); | |
5e985969 DG |
1537 | |
1538 | goto out; | |
1539 | } | |
1540 | ||
1541 | /* not suitable, cancel it */ | |
1542 | resize_hpt_release(kvm, resize); | |
1543 | } | |
1544 | ||
1545 | ret = 0; | |
1546 | if (!shift) | |
1547 | goto out; /* nothing to do */ | |
1548 | ||
1549 | /* start new resize */ | |
1550 | ||
1551 | resize = kzalloc(sizeof(*resize), GFP_KERNEL); | |
abd80dcb DC |
1552 | if (!resize) { |
1553 | ret = -ENOMEM; | |
1554 | goto out; | |
1555 | } | |
3073774e SP |
1556 | |
1557 | resize->error = -EBUSY; | |
5e985969 DG |
1558 | resize->order = shift; |
1559 | resize->kvm = kvm; | |
1560 | INIT_WORK(&resize->work, resize_hpt_prepare_work); | |
1561 | kvm->arch.resize_hpt = resize; | |
1562 | ||
1563 | schedule_work(&resize->work); | |
1564 | ||
1565 | ret = 100; /* estimated time in ms */ | |
1566 | ||
1567 | out: | |
1568 | mutex_unlock(&kvm->lock); | |
1569 | return ret; | |
1570 | } | |
1571 | ||
1572 | static void resize_hpt_boot_vcpu(void *opaque) | |
1573 | { | |
1574 | /* Nothing to do, just force a KVM exit */ | |
1575 | } | |
1576 | ||
1577 | long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, | |
1578 | struct kvm_ppc_resize_hpt *rhpt) | |
1579 | { | |
1580 | unsigned long flags = rhpt->flags; | |
1581 | unsigned long shift = rhpt->shift; | |
1582 | struct kvm_resize_hpt *resize; | |
1583 | long ret; | |
1584 | ||
891f1ebf | 1585 | if (flags != 0 || kvm_is_radix(kvm)) |
5e985969 DG |
1586 | return -EINVAL; |
1587 | ||
1588 | if (shift && ((shift < 18) || (shift > 46))) | |
1589 | return -EINVAL; | |
1590 | ||
1591 | mutex_lock(&kvm->lock); | |
1592 | ||
1593 | resize = kvm->arch.resize_hpt; | |
1594 | ||
1595 | /* This shouldn't be possible */ | |
1596 | ret = -EIO; | |
1b151ce4 | 1597 | if (WARN_ON(!kvm->arch.mmu_ready)) |
5e985969 DG |
1598 | goto out_no_hpt; |
1599 | ||
1600 | /* Stop VCPUs from running while we mess with the HPT */ | |
1b151ce4 | 1601 | kvm->arch.mmu_ready = 0; |
5e985969 DG |
1602 | smp_mb(); |
1603 | ||
1604 | /* Boot all CPUs out of the guest so they re-read | |
1b151ce4 | 1605 | * mmu_ready */ |
5e985969 DG |
1606 | on_each_cpu(resize_hpt_boot_vcpu, NULL, 1); |
1607 | ||
1608 | ret = -ENXIO; | |
1609 | if (!resize || (resize->order != shift)) | |
1610 | goto out; | |
1611 | ||
5e985969 | 1612 | ret = resize->error; |
3073774e | 1613 | if (ret) |
5e985969 DG |
1614 | goto out; |
1615 | ||
1616 | ret = resize_hpt_rehash(resize); | |
3073774e | 1617 | if (ret) |
5e985969 DG |
1618 | goto out; |
1619 | ||
1620 | resize_hpt_pivot(resize); | |
1621 | ||
1622 | out: | |
1623 | /* Let VCPUs run again */ | |
1b151ce4 | 1624 | kvm->arch.mmu_ready = 1; |
5e985969 DG |
1625 | smp_mb(); |
1626 | out_no_hpt: | |
1627 | resize_hpt_release(kvm, resize); | |
1628 | mutex_unlock(&kvm->lock); | |
1629 | return ret; | |
1630 | } | |
1631 | ||
a2932923 PM |
1632 | /* |
1633 | * Functions for reading and writing the hash table via reads and | |
1634 | * writes on a file descriptor. | |
1635 | * | |
1636 | * Reads return the guest view of the hash table, which has to be | |
1637 | * pieced together from the real hash table and the guest_rpte | |
1638 | * values in the revmap array. | |
1639 | * | |
1640 | * On writes, each HPTE written is considered in turn, and if it | |
1641 | * is valid, it is written to the HPT as if an H_ENTER with the | |
1642 | * exact flag set was done. When the invalid count is non-zero | |
1643 | * in the header written to the stream, the kernel will make | |
1644 | * sure that that many HPTEs are invalid, and invalidate them | |
1645 | * if not. | |
1646 | */ | |
1647 | ||
1648 | struct kvm_htab_ctx { | |
1649 | unsigned long index; | |
1650 | unsigned long flags; | |
1651 | struct kvm *kvm; | |
1652 | int first_pass; | |
1653 | }; | |
1654 | ||
1655 | #define HPTE_SIZE (2 * sizeof(unsigned long)) | |
1656 | ||
a1b4a0f6 PM |
1657 | /* |
1658 | * Returns 1 if this HPT entry has been modified or has pending | |
1659 | * R/C bit changes. | |
1660 | */ | |
6f22bd32 | 1661 | static int hpte_dirty(struct revmap_entry *revp, __be64 *hptp) |
a1b4a0f6 PM |
1662 | { |
1663 | unsigned long rcbits_unset; | |
1664 | ||
1665 | if (revp->guest_rpte & HPTE_GR_MODIFIED) | |
1666 | return 1; | |
1667 | ||
1668 | /* Also need to consider changes in reference and changed bits */ | |
1669 | rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); | |
6f22bd32 AG |
1670 | if ((be64_to_cpu(hptp[0]) & HPTE_V_VALID) && |
1671 | (be64_to_cpu(hptp[1]) & rcbits_unset)) | |
a1b4a0f6 PM |
1672 | return 1; |
1673 | ||
1674 | return 0; | |
1675 | } | |
1676 | ||
6f22bd32 | 1677 | static long record_hpte(unsigned long flags, __be64 *hptp, |
a2932923 PM |
1678 | unsigned long *hpte, struct revmap_entry *revp, |
1679 | int want_valid, int first_pass) | |
1680 | { | |
abb7c7dd | 1681 | unsigned long v, r, hr; |
a1b4a0f6 | 1682 | unsigned long rcbits_unset; |
a2932923 PM |
1683 | int ok = 1; |
1684 | int valid, dirty; | |
1685 | ||
1686 | /* Unmodified entries are uninteresting except on the first pass */ | |
a1b4a0f6 | 1687 | dirty = hpte_dirty(revp, hptp); |
a2932923 PM |
1688 | if (!first_pass && !dirty) |
1689 | return 0; | |
1690 | ||
1691 | valid = 0; | |
6f22bd32 | 1692 | if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) { |
a2932923 PM |
1693 | valid = 1; |
1694 | if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && | |
6f22bd32 | 1695 | !(be64_to_cpu(hptp[0]) & HPTE_V_BOLTED)) |
a2932923 PM |
1696 | valid = 0; |
1697 | } | |
1698 | if (valid != want_valid) | |
1699 | return 0; | |
1700 | ||
1701 | v = r = 0; | |
1702 | if (valid || dirty) { | |
1703 | /* lock the HPTE so it's stable and read it */ | |
1704 | preempt_disable(); | |
1705 | while (!try_lock_hpte(hptp, HPTE_V_HVLOCK)) | |
1706 | cpu_relax(); | |
6f22bd32 | 1707 | v = be64_to_cpu(hptp[0]); |
abb7c7dd PM |
1708 | hr = be64_to_cpu(hptp[1]); |
1709 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { | |
1710 | v = hpte_new_to_old_v(v, hr); | |
1711 | hr = hpte_new_to_old_r(hr); | |
1712 | } | |
a1b4a0f6 PM |
1713 | |
1714 | /* re-evaluate valid and dirty from synchronized HPTE value */ | |
1715 | valid = !!(v & HPTE_V_VALID); | |
1716 | dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED); | |
1717 | ||
1718 | /* Harvest R and C into guest view if necessary */ | |
1719 | rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); | |
abb7c7dd PM |
1720 | if (valid && (rcbits_unset & hr)) { |
1721 | revp->guest_rpte |= (hr & | |
6f22bd32 | 1722 | (HPTE_R_R | HPTE_R_C)) | HPTE_GR_MODIFIED; |
a1b4a0f6 PM |
1723 | dirty = 1; |
1724 | } | |
1725 | ||
a2932923 PM |
1726 | if (v & HPTE_V_ABSENT) { |
1727 | v &= ~HPTE_V_ABSENT; | |
1728 | v |= HPTE_V_VALID; | |
a1b4a0f6 | 1729 | valid = 1; |
a2932923 | 1730 | } |
a2932923 PM |
1731 | if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && !(v & HPTE_V_BOLTED)) |
1732 | valid = 0; | |
a1b4a0f6 PM |
1733 | |
1734 | r = revp->guest_rpte; | |
a2932923 PM |
1735 | /* only clear modified if this is the right sort of entry */ |
1736 | if (valid == want_valid && dirty) { | |
1737 | r &= ~HPTE_GR_MODIFIED; | |
1738 | revp->guest_rpte = r; | |
1739 | } | |
a4bd6eb0 | 1740 | unlock_hpte(hptp, be64_to_cpu(hptp[0])); |
a2932923 PM |
1741 | preempt_enable(); |
1742 | if (!(valid == want_valid && (first_pass || dirty))) | |
1743 | ok = 0; | |
1744 | } | |
6f22bd32 AG |
1745 | hpte[0] = cpu_to_be64(v); |
1746 | hpte[1] = cpu_to_be64(r); | |
a2932923 PM |
1747 | return ok; |
1748 | } | |
1749 | ||
1750 | static ssize_t kvm_htab_read(struct file *file, char __user *buf, | |
1751 | size_t count, loff_t *ppos) | |
1752 | { | |
1753 | struct kvm_htab_ctx *ctx = file->private_data; | |
1754 | struct kvm *kvm = ctx->kvm; | |
1755 | struct kvm_get_htab_header hdr; | |
6f22bd32 | 1756 | __be64 *hptp; |
a2932923 PM |
1757 | struct revmap_entry *revp; |
1758 | unsigned long i, nb, nw; | |
1759 | unsigned long __user *lbuf; | |
1760 | struct kvm_get_htab_header __user *hptr; | |
1761 | unsigned long flags; | |
1762 | int first_pass; | |
1763 | unsigned long hpte[2]; | |
1764 | ||
96d4f267 | 1765 | if (!access_ok(buf, count)) |
a2932923 | 1766 | return -EFAULT; |
891f1ebf PM |
1767 | if (kvm_is_radix(kvm)) |
1768 | return 0; | |
a2932923 PM |
1769 | |
1770 | first_pass = ctx->first_pass; | |
1771 | flags = ctx->flags; | |
1772 | ||
1773 | i = ctx->index; | |
3f9d4f5a DG |
1774 | hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); |
1775 | revp = kvm->arch.hpt.rev + i; | |
a2932923 PM |
1776 | lbuf = (unsigned long __user *)buf; |
1777 | ||
1778 | nb = 0; | |
1779 | while (nb + sizeof(hdr) + HPTE_SIZE < count) { | |
1780 | /* Initialize header */ | |
1781 | hptr = (struct kvm_get_htab_header __user *)buf; | |
a2932923 PM |
1782 | hdr.n_valid = 0; |
1783 | hdr.n_invalid = 0; | |
1784 | nw = nb; | |
1785 | nb += sizeof(hdr); | |
1786 | lbuf = (unsigned long __user *)(buf + sizeof(hdr)); | |
1787 | ||
1788 | /* Skip uninteresting entries, i.e. clean on not-first pass */ | |
1789 | if (!first_pass) { | |
3d089f84 | 1790 | while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && |
a1b4a0f6 | 1791 | !hpte_dirty(revp, hptp)) { |
a2932923 PM |
1792 | ++i; |
1793 | hptp += 2; | |
1794 | ++revp; | |
1795 | } | |
1796 | } | |
05dd85f7 | 1797 | hdr.index = i; |
a2932923 PM |
1798 | |
1799 | /* Grab a series of valid entries */ | |
3d089f84 | 1800 | while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && |
a2932923 PM |
1801 | hdr.n_valid < 0xffff && |
1802 | nb + HPTE_SIZE < count && | |
1803 | record_hpte(flags, hptp, hpte, revp, 1, first_pass)) { | |
1804 | /* valid entry, write it out */ | |
1805 | ++hdr.n_valid; | |
1806 | if (__put_user(hpte[0], lbuf) || | |
1807 | __put_user(hpte[1], lbuf + 1)) | |
1808 | return -EFAULT; | |
1809 | nb += HPTE_SIZE; | |
1810 | lbuf += 2; | |
1811 | ++i; | |
1812 | hptp += 2; | |
1813 | ++revp; | |
1814 | } | |
1815 | /* Now skip invalid entries while we can */ | |
3d089f84 | 1816 | while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && |
a2932923 PM |
1817 | hdr.n_invalid < 0xffff && |
1818 | record_hpte(flags, hptp, hpte, revp, 0, first_pass)) { | |
1819 | /* found an invalid entry */ | |
1820 | ++hdr.n_invalid; | |
1821 | ++i; | |
1822 | hptp += 2; | |
1823 | ++revp; | |
1824 | } | |
1825 | ||
1826 | if (hdr.n_valid || hdr.n_invalid) { | |
1827 | /* write back the header */ | |
1828 | if (__copy_to_user(hptr, &hdr, sizeof(hdr))) | |
1829 | return -EFAULT; | |
1830 | nw = nb; | |
1831 | buf = (char __user *)lbuf; | |
1832 | } else { | |
1833 | nb = nw; | |
1834 | } | |
1835 | ||
1836 | /* Check if we've wrapped around the hash table */ | |
3d089f84 | 1837 | if (i >= kvmppc_hpt_npte(&kvm->arch.hpt)) { |
a2932923 PM |
1838 | i = 0; |
1839 | ctx->first_pass = 0; | |
1840 | break; | |
1841 | } | |
1842 | } | |
1843 | ||
1844 | ctx->index = i; | |
1845 | ||
1846 | return nb; | |
1847 | } | |
1848 | ||
1849 | static ssize_t kvm_htab_write(struct file *file, const char __user *buf, | |
1850 | size_t count, loff_t *ppos) | |
1851 | { | |
1852 | struct kvm_htab_ctx *ctx = file->private_data; | |
1853 | struct kvm *kvm = ctx->kvm; | |
1854 | struct kvm_get_htab_header hdr; | |
1855 | unsigned long i, j; | |
1856 | unsigned long v, r; | |
1857 | unsigned long __user *lbuf; | |
6f22bd32 | 1858 | __be64 *hptp; |
a2932923 PM |
1859 | unsigned long tmp[2]; |
1860 | ssize_t nb; | |
1861 | long int err, ret; | |
1b151ce4 | 1862 | int mmu_ready; |
ded13fc1 | 1863 | int pshift; |
a2932923 | 1864 | |
96d4f267 | 1865 | if (!access_ok(buf, count)) |
a2932923 | 1866 | return -EFAULT; |
891f1ebf PM |
1867 | if (kvm_is_radix(kvm)) |
1868 | return -EINVAL; | |
a2932923 PM |
1869 | |
1870 | /* lock out vcpus from running while we're doing this */ | |
1871 | mutex_lock(&kvm->lock); | |
1b151ce4 PM |
1872 | mmu_ready = kvm->arch.mmu_ready; |
1873 | if (mmu_ready) { | |
1874 | kvm->arch.mmu_ready = 0; /* temporarily */ | |
1875 | /* order mmu_ready vs. vcpus_running */ | |
a2932923 PM |
1876 | smp_mb(); |
1877 | if (atomic_read(&kvm->arch.vcpus_running)) { | |
1b151ce4 | 1878 | kvm->arch.mmu_ready = 1; |
a2932923 PM |
1879 | mutex_unlock(&kvm->lock); |
1880 | return -EBUSY; | |
1881 | } | |
1882 | } | |
1883 | ||
1884 | err = 0; | |
1885 | for (nb = 0; nb + sizeof(hdr) <= count; ) { | |
1886 | err = -EFAULT; | |
1887 | if (__copy_from_user(&hdr, buf, sizeof(hdr))) | |
1888 | break; | |
1889 | ||
1890 | err = 0; | |
1891 | if (nb + hdr.n_valid * HPTE_SIZE > count) | |
1892 | break; | |
1893 | ||
1894 | nb += sizeof(hdr); | |
1895 | buf += sizeof(hdr); | |
1896 | ||
1897 | err = -EINVAL; | |
1898 | i = hdr.index; | |
3d089f84 DG |
1899 | if (i >= kvmppc_hpt_npte(&kvm->arch.hpt) || |
1900 | i + hdr.n_valid + hdr.n_invalid > kvmppc_hpt_npte(&kvm->arch.hpt)) | |
a2932923 PM |
1901 | break; |
1902 | ||
3f9d4f5a | 1903 | hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); |
a2932923 PM |
1904 | lbuf = (unsigned long __user *)buf; |
1905 | for (j = 0; j < hdr.n_valid; ++j) { | |
ffada016 CLG |
1906 | __be64 hpte_v; |
1907 | __be64 hpte_r; | |
1908 | ||
a2932923 | 1909 | err = -EFAULT; |
ffada016 CLG |
1910 | if (__get_user(hpte_v, lbuf) || |
1911 | __get_user(hpte_r, lbuf + 1)) | |
a2932923 | 1912 | goto out; |
ffada016 CLG |
1913 | v = be64_to_cpu(hpte_v); |
1914 | r = be64_to_cpu(hpte_r); | |
a2932923 PM |
1915 | err = -EINVAL; |
1916 | if (!(v & HPTE_V_VALID)) | |
1917 | goto out; | |
ded13fc1 PM |
1918 | pshift = kvmppc_hpte_base_page_shift(v, r); |
1919 | if (pshift <= 0) | |
1920 | goto out; | |
a2932923 PM |
1921 | lbuf += 2; |
1922 | nb += HPTE_SIZE; | |
1923 | ||
6f22bd32 | 1924 | if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) |
a2932923 PM |
1925 | kvmppc_do_h_remove(kvm, 0, i, 0, tmp); |
1926 | err = -EIO; | |
1927 | ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r, | |
1928 | tmp); | |
1929 | if (ret != H_SUCCESS) { | |
1930 | pr_err("kvm_htab_write ret %ld i=%ld v=%lx " | |
1931 | "r=%lx\n", ret, i, v, r); | |
1932 | goto out; | |
1933 | } | |
1b151ce4 | 1934 | if (!mmu_ready && is_vrma_hpte(v)) { |
ded13fc1 | 1935 | unsigned long senc, lpcr; |
a2932923 | 1936 | |
ded13fc1 | 1937 | senc = slb_pgsize_encoding(1ul << pshift); |
a2932923 PM |
1938 | kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | |
1939 | (VRMA_VSID << SLB_VSID_SHIFT_1T); | |
ded13fc1 PM |
1940 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) { |
1941 | lpcr = senc << (LPCR_VRMASD_SH - 4); | |
1942 | kvmppc_update_lpcr(kvm, lpcr, | |
1943 | LPCR_VRMASD); | |
1944 | } else { | |
1945 | kvmppc_setup_partition_table(kvm); | |
1946 | } | |
1b151ce4 | 1947 | mmu_ready = 1; |
a2932923 PM |
1948 | } |
1949 | ++i; | |
1950 | hptp += 2; | |
1951 | } | |
1952 | ||
1953 | for (j = 0; j < hdr.n_invalid; ++j) { | |
6f22bd32 | 1954 | if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) |
a2932923 PM |
1955 | kvmppc_do_h_remove(kvm, 0, i, 0, tmp); |
1956 | ++i; | |
1957 | hptp += 2; | |
1958 | } | |
1959 | err = 0; | |
1960 | } | |
1961 | ||
1962 | out: | |
1b151ce4 | 1963 | /* Order HPTE updates vs. mmu_ready */ |
a2932923 | 1964 | smp_wmb(); |
1b151ce4 | 1965 | kvm->arch.mmu_ready = mmu_ready; |
a2932923 PM |
1966 | mutex_unlock(&kvm->lock); |
1967 | ||
1968 | if (err) | |
1969 | return err; | |
1970 | return nb; | |
1971 | } | |
1972 | ||
1973 | static int kvm_htab_release(struct inode *inode, struct file *filp) | |
1974 | { | |
1975 | struct kvm_htab_ctx *ctx = filp->private_data; | |
1976 | ||
1977 | filp->private_data = NULL; | |
1978 | if (!(ctx->flags & KVM_GET_HTAB_WRITE)) | |
1979 | atomic_dec(&ctx->kvm->arch.hpte_mod_interest); | |
1980 | kvm_put_kvm(ctx->kvm); | |
1981 | kfree(ctx); | |
1982 | return 0; | |
1983 | } | |
1984 | ||
75ef9de1 | 1985 | static const struct file_operations kvm_htab_fops = { |
a2932923 PM |
1986 | .read = kvm_htab_read, |
1987 | .write = kvm_htab_write, | |
1988 | .llseek = default_llseek, | |
1989 | .release = kvm_htab_release, | |
1990 | }; | |
1991 | ||
1992 | int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf) | |
1993 | { | |
1994 | int ret; | |
1995 | struct kvm_htab_ctx *ctx; | |
1996 | int rwflag; | |
1997 | ||
1998 | /* reject flags we don't recognize */ | |
1999 | if (ghf->flags & ~(KVM_GET_HTAB_BOLTED_ONLY | KVM_GET_HTAB_WRITE)) | |
2000 | return -EINVAL; | |
2001 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | |
2002 | if (!ctx) | |
2003 | return -ENOMEM; | |
2004 | kvm_get_kvm(kvm); | |
2005 | ctx->kvm = kvm; | |
2006 | ctx->index = ghf->start_index; | |
2007 | ctx->flags = ghf->flags; | |
2008 | ctx->first_pass = 1; | |
2009 | ||
2010 | rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY; | |
2f84d5ea | 2011 | ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag | O_CLOEXEC); |
a2932923 | 2012 | if (ret < 0) { |
43f6b0cf | 2013 | kfree(ctx); |
a2932923 PM |
2014 | kvm_put_kvm(kvm); |
2015 | return ret; | |
2016 | } | |
2017 | ||
2018 | if (rwflag == O_RDONLY) { | |
2019 | mutex_lock(&kvm->slots_lock); | |
2020 | atomic_inc(&kvm->arch.hpte_mod_interest); | |
2021 | /* make sure kvmppc_do_h_enter etc. see the increment */ | |
2022 | synchronize_srcu_expedited(&kvm->srcu); | |
2023 | mutex_unlock(&kvm->slots_lock); | |
2024 | } | |
2025 | ||
2026 | return ret; | |
2027 | } | |
2028 | ||
e23a808b PM |
2029 | struct debugfs_htab_state { |
2030 | struct kvm *kvm; | |
2031 | struct mutex mutex; | |
2032 | unsigned long hpt_index; | |
2033 | int chars_left; | |
2034 | int buf_index; | |
2035 | char buf[64]; | |
2036 | }; | |
2037 | ||
2038 | static int debugfs_htab_open(struct inode *inode, struct file *file) | |
2039 | { | |
2040 | struct kvm *kvm = inode->i_private; | |
2041 | struct debugfs_htab_state *p; | |
2042 | ||
2043 | p = kzalloc(sizeof(*p), GFP_KERNEL); | |
2044 | if (!p) | |
2045 | return -ENOMEM; | |
2046 | ||
2047 | kvm_get_kvm(kvm); | |
2048 | p->kvm = kvm; | |
2049 | mutex_init(&p->mutex); | |
2050 | file->private_data = p; | |
2051 | ||
2052 | return nonseekable_open(inode, file); | |
2053 | } | |
2054 | ||
2055 | static int debugfs_htab_release(struct inode *inode, struct file *file) | |
2056 | { | |
2057 | struct debugfs_htab_state *p = file->private_data; | |
2058 | ||
2059 | kvm_put_kvm(p->kvm); | |
2060 | kfree(p); | |
2061 | return 0; | |
2062 | } | |
2063 | ||
2064 | static ssize_t debugfs_htab_read(struct file *file, char __user *buf, | |
2065 | size_t len, loff_t *ppos) | |
2066 | { | |
2067 | struct debugfs_htab_state *p = file->private_data; | |
2068 | ssize_t ret, r; | |
2069 | unsigned long i, n; | |
2070 | unsigned long v, hr, gr; | |
2071 | struct kvm *kvm; | |
2072 | __be64 *hptp; | |
2073 | ||
891f1ebf PM |
2074 | kvm = p->kvm; |
2075 | if (kvm_is_radix(kvm)) | |
2076 | return 0; | |
2077 | ||
e23a808b PM |
2078 | ret = mutex_lock_interruptible(&p->mutex); |
2079 | if (ret) | |
2080 | return ret; | |
2081 | ||
2082 | if (p->chars_left) { | |
2083 | n = p->chars_left; | |
2084 | if (n > len) | |
2085 | n = len; | |
2086 | r = copy_to_user(buf, p->buf + p->buf_index, n); | |
2087 | n -= r; | |
2088 | p->chars_left -= n; | |
2089 | p->buf_index += n; | |
2090 | buf += n; | |
2091 | len -= n; | |
2092 | ret = n; | |
2093 | if (r) { | |
2094 | if (!n) | |
2095 | ret = -EFAULT; | |
2096 | goto out; | |
2097 | } | |
2098 | } | |
2099 | ||
e23a808b | 2100 | i = p->hpt_index; |
3f9d4f5a | 2101 | hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); |
3d089f84 DG |
2102 | for (; len != 0 && i < kvmppc_hpt_npte(&kvm->arch.hpt); |
2103 | ++i, hptp += 2) { | |
e23a808b PM |
2104 | if (!(be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))) |
2105 | continue; | |
2106 | ||
2107 | /* lock the HPTE so it's stable and read it */ | |
2108 | preempt_disable(); | |
2109 | while (!try_lock_hpte(hptp, HPTE_V_HVLOCK)) | |
2110 | cpu_relax(); | |
2111 | v = be64_to_cpu(hptp[0]) & ~HPTE_V_HVLOCK; | |
2112 | hr = be64_to_cpu(hptp[1]); | |
3f9d4f5a | 2113 | gr = kvm->arch.hpt.rev[i].guest_rpte; |
e23a808b PM |
2114 | unlock_hpte(hptp, v); |
2115 | preempt_enable(); | |
2116 | ||
2117 | if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT))) | |
2118 | continue; | |
2119 | ||
2120 | n = scnprintf(p->buf, sizeof(p->buf), | |
2121 | "%6lx %.16lx %.16lx %.16lx\n", | |
2122 | i, v, hr, gr); | |
2123 | p->chars_left = n; | |
2124 | if (n > len) | |
2125 | n = len; | |
2126 | r = copy_to_user(buf, p->buf, n); | |
2127 | n -= r; | |
2128 | p->chars_left -= n; | |
2129 | p->buf_index = n; | |
2130 | buf += n; | |
2131 | len -= n; | |
2132 | ret += n; | |
2133 | if (r) { | |
2134 | if (!ret) | |
2135 | ret = -EFAULT; | |
2136 | goto out; | |
2137 | } | |
2138 | } | |
2139 | p->hpt_index = i; | |
2140 | ||
2141 | out: | |
2142 | mutex_unlock(&p->mutex); | |
2143 | return ret; | |
2144 | } | |
2145 | ||
025c9511 | 2146 | static ssize_t debugfs_htab_write(struct file *file, const char __user *buf, |
e23a808b PM |
2147 | size_t len, loff_t *ppos) |
2148 | { | |
2149 | return -EACCES; | |
2150 | } | |
2151 | ||
2152 | static const struct file_operations debugfs_htab_fops = { | |
2153 | .owner = THIS_MODULE, | |
2154 | .open = debugfs_htab_open, | |
2155 | .release = debugfs_htab_release, | |
2156 | .read = debugfs_htab_read, | |
2157 | .write = debugfs_htab_write, | |
2158 | .llseek = generic_file_llseek, | |
2159 | }; | |
2160 | ||
2161 | void kvmppc_mmu_debugfs_init(struct kvm *kvm) | |
2162 | { | |
2163 | kvm->arch.htab_dentry = debugfs_create_file("htab", 0400, | |
2164 | kvm->arch.debugfs_dir, kvm, | |
2165 | &debugfs_htab_fops); | |
2166 | } | |
2167 | ||
de56a948 PM |
2168 | void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu) |
2169 | { | |
2170 | struct kvmppc_mmu *mmu = &vcpu->arch.mmu; | |
2171 | ||
c17b98cf | 2172 | vcpu->arch.slb_nr = 32; /* POWER7/POWER8 */ |
de56a948 | 2173 | |
18c3640c | 2174 | mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate; |
de56a948 PM |
2175 | mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr; |
2176 | ||
2177 | vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; | |
2178 | } |