Commit | Line | Data |
---|---|---|
de56a948 PM |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
16 | */ | |
17 | ||
18 | #include <linux/types.h> | |
19 | #include <linux/string.h> | |
20 | #include <linux/kvm.h> | |
21 | #include <linux/kvm_host.h> | |
22 | #include <linux/highmem.h> | |
23 | #include <linux/gfp.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/hugetlb.h> | |
8936dda4 | 26 | #include <linux/vmalloc.h> |
2c9097e4 | 27 | #include <linux/srcu.h> |
a2932923 PM |
28 | #include <linux/anon_inodes.h> |
29 | #include <linux/file.h> | |
de56a948 PM |
30 | |
31 | #include <asm/tlbflush.h> | |
32 | #include <asm/kvm_ppc.h> | |
33 | #include <asm/kvm_book3s.h> | |
34 | #include <asm/mmu-hash64.h> | |
35 | #include <asm/hvcall.h> | |
36 | #include <asm/synch.h> | |
37 | #include <asm/ppc-opcode.h> | |
38 | #include <asm/cputable.h> | |
39 | ||
9e368f29 PM |
40 | /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ |
41 | #define MAX_LPID_970 63 | |
de56a948 | 42 | |
32fad281 PM |
43 | /* Power architecture requires HPT is at least 256kB */ |
44 | #define PPC_MIN_HPT_ORDER 18 | |
45 | ||
7ed661bf PM |
46 | static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, |
47 | long pte_index, unsigned long pteh, | |
48 | unsigned long ptel, unsigned long *pte_idx_ret); | |
a64fd707 | 49 | static void kvmppc_rmap_reset(struct kvm *kvm); |
7ed661bf | 50 | |
32fad281 | 51 | long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) |
de56a948 PM |
52 | { |
53 | unsigned long hpt; | |
8936dda4 | 54 | struct revmap_entry *rev; |
d2a1b483 | 55 | struct kvmppc_linear_info *li; |
32fad281 | 56 | long order = kvm_hpt_order; |
de56a948 | 57 | |
32fad281 PM |
58 | if (htab_orderp) { |
59 | order = *htab_orderp; | |
60 | if (order < PPC_MIN_HPT_ORDER) | |
61 | order = PPC_MIN_HPT_ORDER; | |
62 | } | |
63 | ||
64 | /* | |
65 | * If the user wants a different size from default, | |
66 | * try first to allocate it from the kernel page allocator. | |
67 | */ | |
68 | hpt = 0; | |
69 | if (order != kvm_hpt_order) { | |
d2a1b483 | 70 | hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT| |
32fad281 PM |
71 | __GFP_NOWARN, order - PAGE_SHIFT); |
72 | if (!hpt) | |
73 | --order; | |
d2a1b483 AG |
74 | } |
75 | ||
32fad281 | 76 | /* Next try to allocate from the preallocated pool */ |
de56a948 | 77 | if (!hpt) { |
32fad281 PM |
78 | li = kvm_alloc_hpt(); |
79 | if (li) { | |
80 | hpt = (ulong)li->base_virt; | |
81 | kvm->arch.hpt_li = li; | |
82 | order = kvm_hpt_order; | |
83 | } | |
de56a948 | 84 | } |
32fad281 PM |
85 | |
86 | /* Lastly try successively smaller sizes from the page allocator */ | |
87 | while (!hpt && order > PPC_MIN_HPT_ORDER) { | |
88 | hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT| | |
89 | __GFP_NOWARN, order - PAGE_SHIFT); | |
90 | if (!hpt) | |
91 | --order; | |
92 | } | |
93 | ||
94 | if (!hpt) | |
95 | return -ENOMEM; | |
96 | ||
de56a948 | 97 | kvm->arch.hpt_virt = hpt; |
32fad281 PM |
98 | kvm->arch.hpt_order = order; |
99 | /* HPTEs are 2**4 bytes long */ | |
100 | kvm->arch.hpt_npte = 1ul << (order - 4); | |
101 | /* 128 (2**7) bytes in each HPTEG */ | |
102 | kvm->arch.hpt_mask = (1ul << (order - 7)) - 1; | |
de56a948 | 103 | |
8936dda4 | 104 | /* Allocate reverse map array */ |
32fad281 | 105 | rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte); |
8936dda4 PM |
106 | if (!rev) { |
107 | pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n"); | |
108 | goto out_freehpt; | |
109 | } | |
110 | kvm->arch.revmap = rev; | |
32fad281 | 111 | kvm->arch.sdr1 = __pa(hpt) | (order - 18); |
8936dda4 | 112 | |
32fad281 PM |
113 | pr_info("KVM guest htab at %lx (order %ld), LPID %x\n", |
114 | hpt, order, kvm->arch.lpid); | |
de56a948 | 115 | |
32fad281 PM |
116 | if (htab_orderp) |
117 | *htab_orderp = order; | |
de56a948 | 118 | return 0; |
8936dda4 | 119 | |
8936dda4 | 120 | out_freehpt: |
32fad281 PM |
121 | if (kvm->arch.hpt_li) |
122 | kvm_release_hpt(kvm->arch.hpt_li); | |
123 | else | |
124 | free_pages(hpt, order - PAGE_SHIFT); | |
8936dda4 | 125 | return -ENOMEM; |
de56a948 PM |
126 | } |
127 | ||
32fad281 PM |
128 | long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp) |
129 | { | |
130 | long err = -EBUSY; | |
131 | long order; | |
132 | ||
133 | mutex_lock(&kvm->lock); | |
134 | if (kvm->arch.rma_setup_done) { | |
135 | kvm->arch.rma_setup_done = 0; | |
136 | /* order rma_setup_done vs. vcpus_running */ | |
137 | smp_mb(); | |
138 | if (atomic_read(&kvm->arch.vcpus_running)) { | |
139 | kvm->arch.rma_setup_done = 1; | |
140 | goto out; | |
141 | } | |
142 | } | |
143 | if (kvm->arch.hpt_virt) { | |
144 | order = kvm->arch.hpt_order; | |
145 | /* Set the entire HPT to 0, i.e. invalid HPTEs */ | |
146 | memset((void *)kvm->arch.hpt_virt, 0, 1ul << order); | |
a64fd707 PM |
147 | /* |
148 | * Reset all the reverse-mapping chains for all memslots | |
149 | */ | |
150 | kvmppc_rmap_reset(kvm); | |
32fad281 PM |
151 | /* |
152 | * Set the whole last_vcpu array to an invalid vcpu number. | |
153 | * This ensures that each vcpu will flush its TLB on next entry. | |
154 | */ | |
155 | memset(kvm->arch.last_vcpu, 0xff, sizeof(kvm->arch.last_vcpu)); | |
156 | *htab_orderp = order; | |
157 | err = 0; | |
158 | } else { | |
159 | err = kvmppc_alloc_hpt(kvm, htab_orderp); | |
160 | order = *htab_orderp; | |
161 | } | |
162 | out: | |
163 | mutex_unlock(&kvm->lock); | |
164 | return err; | |
165 | } | |
166 | ||
de56a948 PM |
167 | void kvmppc_free_hpt(struct kvm *kvm) |
168 | { | |
043cc4d7 | 169 | kvmppc_free_lpid(kvm->arch.lpid); |
8936dda4 | 170 | vfree(kvm->arch.revmap); |
d2a1b483 AG |
171 | if (kvm->arch.hpt_li) |
172 | kvm_release_hpt(kvm->arch.hpt_li); | |
173 | else | |
32fad281 PM |
174 | free_pages(kvm->arch.hpt_virt, |
175 | kvm->arch.hpt_order - PAGE_SHIFT); | |
de56a948 PM |
176 | } |
177 | ||
da9d1d7f PM |
178 | /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */ |
179 | static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize) | |
180 | { | |
181 | return (pgsize > 0x1000) ? HPTE_V_LARGE : 0; | |
182 | } | |
183 | ||
184 | /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */ | |
185 | static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize) | |
186 | { | |
187 | return (pgsize == 0x10000) ? 0x1000 : 0; | |
188 | } | |
189 | ||
190 | void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, | |
191 | unsigned long porder) | |
de56a948 PM |
192 | { |
193 | unsigned long i; | |
b2b2f165 | 194 | unsigned long npages; |
c77162de PM |
195 | unsigned long hp_v, hp_r; |
196 | unsigned long addr, hash; | |
da9d1d7f PM |
197 | unsigned long psize; |
198 | unsigned long hp0, hp1; | |
7ed661bf | 199 | unsigned long idx_ret; |
c77162de | 200 | long ret; |
32fad281 | 201 | struct kvm *kvm = vcpu->kvm; |
de56a948 | 202 | |
da9d1d7f PM |
203 | psize = 1ul << porder; |
204 | npages = memslot->npages >> (porder - PAGE_SHIFT); | |
de56a948 PM |
205 | |
206 | /* VRMA can't be > 1TB */ | |
8936dda4 PM |
207 | if (npages > 1ul << (40 - porder)) |
208 | npages = 1ul << (40 - porder); | |
de56a948 | 209 | /* Can't use more than 1 HPTE per HPTEG */ |
32fad281 PM |
210 | if (npages > kvm->arch.hpt_mask + 1) |
211 | npages = kvm->arch.hpt_mask + 1; | |
de56a948 | 212 | |
da9d1d7f PM |
213 | hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) | |
214 | HPTE_V_BOLTED | hpte0_pgsize_encoding(psize); | |
215 | hp1 = hpte1_pgsize_encoding(psize) | | |
216 | HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX; | |
217 | ||
de56a948 | 218 | for (i = 0; i < npages; ++i) { |
c77162de | 219 | addr = i << porder; |
de56a948 | 220 | /* can't use hpt_hash since va > 64 bits */ |
32fad281 | 221 | hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask; |
de56a948 PM |
222 | /* |
223 | * We assume that the hash table is empty and no | |
224 | * vcpus are using it at this stage. Since we create | |
225 | * at most one HPTE per HPTEG, we just assume entry 7 | |
226 | * is available and use it. | |
227 | */ | |
8936dda4 | 228 | hash = (hash << 3) + 7; |
da9d1d7f PM |
229 | hp_v = hp0 | ((addr >> 16) & ~0x7fUL); |
230 | hp_r = hp1 | addr; | |
7ed661bf PM |
231 | ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, hash, hp_v, hp_r, |
232 | &idx_ret); | |
c77162de PM |
233 | if (ret != H_SUCCESS) { |
234 | pr_err("KVM: map_vrma at %lx failed, ret=%ld\n", | |
235 | addr, ret); | |
236 | break; | |
237 | } | |
de56a948 PM |
238 | } |
239 | } | |
240 | ||
241 | int kvmppc_mmu_hv_init(void) | |
242 | { | |
9e368f29 PM |
243 | unsigned long host_lpid, rsvd_lpid; |
244 | ||
245 | if (!cpu_has_feature(CPU_FTR_HVMODE)) | |
de56a948 | 246 | return -EINVAL; |
9e368f29 | 247 | |
043cc4d7 | 248 | /* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */ |
9e368f29 PM |
249 | if (cpu_has_feature(CPU_FTR_ARCH_206)) { |
250 | host_lpid = mfspr(SPRN_LPID); /* POWER7 */ | |
251 | rsvd_lpid = LPID_RSVD; | |
252 | } else { | |
253 | host_lpid = 0; /* PPC970 */ | |
254 | rsvd_lpid = MAX_LPID_970; | |
255 | } | |
256 | ||
043cc4d7 SW |
257 | kvmppc_init_lpid(rsvd_lpid + 1); |
258 | ||
259 | kvmppc_claim_lpid(host_lpid); | |
9e368f29 | 260 | /* rsvd_lpid is reserved for use in partition switching */ |
043cc4d7 | 261 | kvmppc_claim_lpid(rsvd_lpid); |
de56a948 PM |
262 | |
263 | return 0; | |
264 | } | |
265 | ||
266 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | |
267 | { | |
268 | } | |
269 | ||
270 | static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) | |
271 | { | |
272 | kvmppc_set_msr(vcpu, MSR_SF | MSR_ME); | |
273 | } | |
274 | ||
c77162de PM |
275 | /* |
276 | * This is called to get a reference to a guest page if there isn't | |
a66b48c3 | 277 | * one already in the memslot->arch.slot_phys[] array. |
c77162de PM |
278 | */ |
279 | static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, | |
da9d1d7f PM |
280 | struct kvm_memory_slot *memslot, |
281 | unsigned long psize) | |
c77162de PM |
282 | { |
283 | unsigned long start; | |
da9d1d7f PM |
284 | long np, err; |
285 | struct page *page, *hpage, *pages[1]; | |
286 | unsigned long s, pgsize; | |
c77162de | 287 | unsigned long *physp; |
9d0ef5ea PM |
288 | unsigned int is_io, got, pgorder; |
289 | struct vm_area_struct *vma; | |
da9d1d7f | 290 | unsigned long pfn, i, npages; |
c77162de | 291 | |
a66b48c3 | 292 | physp = memslot->arch.slot_phys; |
c77162de PM |
293 | if (!physp) |
294 | return -EINVAL; | |
da9d1d7f | 295 | if (physp[gfn - memslot->base_gfn]) |
c77162de PM |
296 | return 0; |
297 | ||
9d0ef5ea PM |
298 | is_io = 0; |
299 | got = 0; | |
c77162de | 300 | page = NULL; |
da9d1d7f | 301 | pgsize = psize; |
9d0ef5ea | 302 | err = -EINVAL; |
c77162de PM |
303 | start = gfn_to_hva_memslot(memslot, gfn); |
304 | ||
305 | /* Instantiate and get the page we want access to */ | |
306 | np = get_user_pages_fast(start, 1, 1, pages); | |
9d0ef5ea PM |
307 | if (np != 1) { |
308 | /* Look up the vma for the page */ | |
309 | down_read(¤t->mm->mmap_sem); | |
310 | vma = find_vma(current->mm, start); | |
311 | if (!vma || vma->vm_start > start || | |
312 | start + psize > vma->vm_end || | |
313 | !(vma->vm_flags & VM_PFNMAP)) | |
314 | goto up_err; | |
315 | is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot)); | |
316 | pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); | |
317 | /* check alignment of pfn vs. requested page size */ | |
318 | if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1))) | |
319 | goto up_err; | |
320 | up_read(¤t->mm->mmap_sem); | |
321 | ||
322 | } else { | |
323 | page = pages[0]; | |
324 | got = KVMPPC_GOT_PAGE; | |
325 | ||
326 | /* See if this is a large page */ | |
327 | s = PAGE_SIZE; | |
328 | if (PageHuge(page)) { | |
329 | hpage = compound_head(page); | |
330 | s <<= compound_order(hpage); | |
331 | /* Get the whole large page if slot alignment is ok */ | |
332 | if (s > psize && slot_is_aligned(memslot, s) && | |
333 | !(memslot->userspace_addr & (s - 1))) { | |
334 | start &= ~(s - 1); | |
335 | pgsize = s; | |
de6c0b02 DG |
336 | get_page(hpage); |
337 | put_page(page); | |
9d0ef5ea PM |
338 | page = hpage; |
339 | } | |
da9d1d7f | 340 | } |
9d0ef5ea PM |
341 | if (s < psize) |
342 | goto out; | |
343 | pfn = page_to_pfn(page); | |
c77162de | 344 | } |
c77162de | 345 | |
da9d1d7f PM |
346 | npages = pgsize >> PAGE_SHIFT; |
347 | pgorder = __ilog2(npages); | |
348 | physp += (gfn - memslot->base_gfn) & ~(npages - 1); | |
c77162de | 349 | spin_lock(&kvm->arch.slot_phys_lock); |
da9d1d7f PM |
350 | for (i = 0; i < npages; ++i) { |
351 | if (!physp[i]) { | |
9d0ef5ea PM |
352 | physp[i] = ((pfn + i) << PAGE_SHIFT) + |
353 | got + is_io + pgorder; | |
da9d1d7f PM |
354 | got = 0; |
355 | } | |
356 | } | |
c77162de | 357 | spin_unlock(&kvm->arch.slot_phys_lock); |
da9d1d7f | 358 | err = 0; |
c77162de | 359 | |
da9d1d7f | 360 | out: |
de6c0b02 | 361 | if (got) |
da9d1d7f | 362 | put_page(page); |
da9d1d7f | 363 | return err; |
9d0ef5ea PM |
364 | |
365 | up_err: | |
366 | up_read(¤t->mm->mmap_sem); | |
367 | return err; | |
c77162de PM |
368 | } |
369 | ||
7ed661bf PM |
370 | long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, |
371 | long pte_index, unsigned long pteh, | |
372 | unsigned long ptel, unsigned long *pte_idx_ret) | |
c77162de | 373 | { |
c77162de PM |
374 | unsigned long psize, gpa, gfn; |
375 | struct kvm_memory_slot *memslot; | |
376 | long ret; | |
377 | ||
342d3db7 PM |
378 | if (kvm->arch.using_mmu_notifiers) |
379 | goto do_insert; | |
380 | ||
c77162de PM |
381 | psize = hpte_page_size(pteh, ptel); |
382 | if (!psize) | |
383 | return H_PARAMETER; | |
384 | ||
697d3899 PM |
385 | pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID); |
386 | ||
c77162de PM |
387 | /* Find the memslot (if any) for this address */ |
388 | gpa = (ptel & HPTE_R_RPN) & ~(psize - 1); | |
389 | gfn = gpa >> PAGE_SHIFT; | |
390 | memslot = gfn_to_memslot(kvm, gfn); | |
697d3899 PM |
391 | if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) { |
392 | if (!slot_is_aligned(memslot, psize)) | |
393 | return H_PARAMETER; | |
394 | if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0) | |
395 | return H_PARAMETER; | |
396 | } | |
c77162de | 397 | |
342d3db7 PM |
398 | do_insert: |
399 | /* Protect linux PTE lookup from page table destruction */ | |
400 | rcu_read_lock_sched(); /* this disables preemption too */ | |
7ed661bf PM |
401 | ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel, |
402 | current->mm->pgd, false, pte_idx_ret); | |
342d3db7 | 403 | rcu_read_unlock_sched(); |
c77162de PM |
404 | if (ret == H_TOO_HARD) { |
405 | /* this can't happen */ | |
406 | pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n"); | |
407 | ret = H_RESOURCE; /* or something */ | |
408 | } | |
409 | return ret; | |
410 | ||
411 | } | |
412 | ||
7ed661bf PM |
413 | /* |
414 | * We come here on a H_ENTER call from the guest when we are not | |
415 | * using mmu notifiers and we don't have the requested page pinned | |
416 | * already. | |
417 | */ | |
418 | long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, | |
419 | long pte_index, unsigned long pteh, | |
420 | unsigned long ptel) | |
421 | { | |
422 | return kvmppc_virtmode_do_h_enter(vcpu->kvm, flags, pte_index, | |
423 | pteh, ptel, &vcpu->arch.gpr[4]); | |
424 | } | |
425 | ||
697d3899 PM |
426 | static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu, |
427 | gva_t eaddr) | |
428 | { | |
429 | u64 mask; | |
430 | int i; | |
431 | ||
432 | for (i = 0; i < vcpu->arch.slb_nr; i++) { | |
433 | if (!(vcpu->arch.slb[i].orige & SLB_ESID_V)) | |
434 | continue; | |
435 | ||
436 | if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T) | |
437 | mask = ESID_MASK_1T; | |
438 | else | |
439 | mask = ESID_MASK; | |
440 | ||
441 | if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0) | |
442 | return &vcpu->arch.slb[i]; | |
443 | } | |
444 | return NULL; | |
445 | } | |
446 | ||
447 | static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r, | |
448 | unsigned long ea) | |
449 | { | |
450 | unsigned long ra_mask; | |
451 | ||
452 | ra_mask = hpte_page_size(v, r) - 1; | |
453 | return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask); | |
454 | } | |
455 | ||
de56a948 | 456 | static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, |
697d3899 | 457 | struct kvmppc_pte *gpte, bool data) |
de56a948 | 458 | { |
697d3899 PM |
459 | struct kvm *kvm = vcpu->kvm; |
460 | struct kvmppc_slb *slbe; | |
461 | unsigned long slb_v; | |
462 | unsigned long pp, key; | |
463 | unsigned long v, gr; | |
464 | unsigned long *hptep; | |
465 | int index; | |
466 | int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); | |
467 | ||
468 | /* Get SLB entry */ | |
469 | if (virtmode) { | |
470 | slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr); | |
471 | if (!slbe) | |
472 | return -EINVAL; | |
473 | slb_v = slbe->origv; | |
474 | } else { | |
475 | /* real mode access */ | |
476 | slb_v = vcpu->kvm->arch.vrma_slb_v; | |
477 | } | |
478 | ||
479 | /* Find the HPTE in the hash table */ | |
480 | index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, | |
481 | HPTE_V_VALID | HPTE_V_ABSENT); | |
482 | if (index < 0) | |
483 | return -ENOENT; | |
484 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); | |
485 | v = hptep[0] & ~HPTE_V_HVLOCK; | |
486 | gr = kvm->arch.revmap[index].guest_rpte; | |
487 | ||
488 | /* Unlock the HPTE */ | |
489 | asm volatile("lwsync" : : : "memory"); | |
490 | hptep[0] = v; | |
491 | ||
492 | gpte->eaddr = eaddr; | |
493 | gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); | |
494 | ||
495 | /* Get PP bits and key for permission check */ | |
496 | pp = gr & (HPTE_R_PP0 | HPTE_R_PP); | |
497 | key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; | |
498 | key &= slb_v; | |
499 | ||
500 | /* Calculate permissions */ | |
501 | gpte->may_read = hpte_read_permission(pp, key); | |
502 | gpte->may_write = hpte_write_permission(pp, key); | |
503 | gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G)); | |
504 | ||
505 | /* Storage key permission check for POWER7 */ | |
506 | if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) { | |
507 | int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr); | |
508 | if (amrfield & 1) | |
509 | gpte->may_read = 0; | |
510 | if (amrfield & 2) | |
511 | gpte->may_write = 0; | |
512 | } | |
513 | ||
514 | /* Get the guest physical address */ | |
515 | gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr); | |
516 | return 0; | |
517 | } | |
518 | ||
519 | /* | |
520 | * Quick test for whether an instruction is a load or a store. | |
521 | * If the instruction is a load or a store, then this will indicate | |
522 | * which it is, at least on server processors. (Embedded processors | |
523 | * have some external PID instructions that don't follow the rule | |
524 | * embodied here.) If the instruction isn't a load or store, then | |
525 | * this doesn't return anything useful. | |
526 | */ | |
527 | static int instruction_is_store(unsigned int instr) | |
528 | { | |
529 | unsigned int mask; | |
530 | ||
531 | mask = 0x10000000; | |
532 | if ((instr & 0xfc000000) == 0x7c000000) | |
533 | mask = 0x100; /* major opcode 31 */ | |
534 | return (instr & mask) != 0; | |
535 | } | |
536 | ||
537 | static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
6020c0f6 | 538 | unsigned long gpa, gva_t ea, int is_store) |
697d3899 PM |
539 | { |
540 | int ret; | |
541 | u32 last_inst; | |
542 | unsigned long srr0 = kvmppc_get_pc(vcpu); | |
543 | ||
544 | /* We try to load the last instruction. We don't let | |
545 | * emulate_instruction do it as it doesn't check what | |
546 | * kvmppc_ld returns. | |
547 | * If we fail, we just return to the guest and try executing it again. | |
548 | */ | |
549 | if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) { | |
550 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); | |
551 | if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED) | |
552 | return RESUME_GUEST; | |
553 | vcpu->arch.last_inst = last_inst; | |
554 | } | |
555 | ||
556 | /* | |
557 | * WARNING: We do not know for sure whether the instruction we just | |
558 | * read from memory is the same that caused the fault in the first | |
559 | * place. If the instruction we read is neither an load or a store, | |
560 | * then it can't access memory, so we don't need to worry about | |
561 | * enforcing access permissions. So, assuming it is a load or | |
562 | * store, we just check that its direction (load or store) is | |
563 | * consistent with the original fault, since that's what we | |
564 | * checked the access permissions against. If there is a mismatch | |
565 | * we just return and retry the instruction. | |
566 | */ | |
567 | ||
568 | if (instruction_is_store(vcpu->arch.last_inst) != !!is_store) | |
569 | return RESUME_GUEST; | |
570 | ||
571 | /* | |
572 | * Emulated accesses are emulated by looking at the hash for | |
573 | * translation once, then performing the access later. The | |
574 | * translation could be invalidated in the meantime in which | |
575 | * point performing the subsequent memory access on the old | |
576 | * physical address could possibly be a security hole for the | |
577 | * guest (but not the host). | |
578 | * | |
579 | * This is less of an issue for MMIO stores since they aren't | |
580 | * globally visible. It could be an issue for MMIO loads to | |
581 | * a certain extent but we'll ignore it for now. | |
582 | */ | |
583 | ||
584 | vcpu->arch.paddr_accessed = gpa; | |
6020c0f6 | 585 | vcpu->arch.vaddr_accessed = ea; |
697d3899 PM |
586 | return kvmppc_emulate_mmio(run, vcpu); |
587 | } | |
588 | ||
589 | int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
590 | unsigned long ea, unsigned long dsisr) | |
591 | { | |
592 | struct kvm *kvm = vcpu->kvm; | |
342d3db7 PM |
593 | unsigned long *hptep, hpte[3], r; |
594 | unsigned long mmu_seq, psize, pte_size; | |
70bddfef | 595 | unsigned long gpa, gfn, hva, pfn; |
697d3899 | 596 | struct kvm_memory_slot *memslot; |
342d3db7 | 597 | unsigned long *rmap; |
697d3899 | 598 | struct revmap_entry *rev; |
342d3db7 PM |
599 | struct page *page, *pages[1]; |
600 | long index, ret, npages; | |
601 | unsigned long is_io; | |
4cf302bc | 602 | unsigned int writing, write_ok; |
342d3db7 | 603 | struct vm_area_struct *vma; |
bad3b507 | 604 | unsigned long rcbits; |
697d3899 PM |
605 | |
606 | /* | |
607 | * Real-mode code has already searched the HPT and found the | |
608 | * entry we're interested in. Lock the entry and check that | |
609 | * it hasn't changed. If it has, just return and re-execute the | |
610 | * instruction. | |
611 | */ | |
612 | if (ea != vcpu->arch.pgfault_addr) | |
613 | return RESUME_GUEST; | |
614 | index = vcpu->arch.pgfault_index; | |
615 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); | |
616 | rev = &kvm->arch.revmap[index]; | |
617 | preempt_disable(); | |
618 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) | |
619 | cpu_relax(); | |
620 | hpte[0] = hptep[0] & ~HPTE_V_HVLOCK; | |
621 | hpte[1] = hptep[1]; | |
342d3db7 | 622 | hpte[2] = r = rev->guest_rpte; |
697d3899 PM |
623 | asm volatile("lwsync" : : : "memory"); |
624 | hptep[0] = hpte[0]; | |
625 | preempt_enable(); | |
626 | ||
627 | if (hpte[0] != vcpu->arch.pgfault_hpte[0] || | |
628 | hpte[1] != vcpu->arch.pgfault_hpte[1]) | |
629 | return RESUME_GUEST; | |
630 | ||
631 | /* Translate the logical address and get the page */ | |
342d3db7 | 632 | psize = hpte_page_size(hpte[0], r); |
70bddfef PM |
633 | gpa = (r & HPTE_R_RPN & ~(psize - 1)) | (ea & (psize - 1)); |
634 | gfn = gpa >> PAGE_SHIFT; | |
697d3899 PM |
635 | memslot = gfn_to_memslot(kvm, gfn); |
636 | ||
637 | /* No memslot means it's an emulated MMIO region */ | |
70bddfef | 638 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) |
6020c0f6 | 639 | return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, |
697d3899 | 640 | dsisr & DSISR_ISSTORE); |
697d3899 | 641 | |
342d3db7 PM |
642 | if (!kvm->arch.using_mmu_notifiers) |
643 | return -EFAULT; /* should never get here */ | |
644 | ||
645 | /* used to check for invalidations in progress */ | |
646 | mmu_seq = kvm->mmu_notifier_seq; | |
647 | smp_rmb(); | |
648 | ||
649 | is_io = 0; | |
650 | pfn = 0; | |
651 | page = NULL; | |
652 | pte_size = PAGE_SIZE; | |
4cf302bc PM |
653 | writing = (dsisr & DSISR_ISSTORE) != 0; |
654 | /* If writing != 0, then the HPTE must allow writing, if we get here */ | |
655 | write_ok = writing; | |
342d3db7 | 656 | hva = gfn_to_hva_memslot(memslot, gfn); |
4cf302bc | 657 | npages = get_user_pages_fast(hva, 1, writing, pages); |
342d3db7 PM |
658 | if (npages < 1) { |
659 | /* Check if it's an I/O mapping */ | |
660 | down_read(¤t->mm->mmap_sem); | |
661 | vma = find_vma(current->mm, hva); | |
662 | if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end && | |
663 | (vma->vm_flags & VM_PFNMAP)) { | |
664 | pfn = vma->vm_pgoff + | |
665 | ((hva - vma->vm_start) >> PAGE_SHIFT); | |
666 | pte_size = psize; | |
667 | is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot)); | |
4cf302bc | 668 | write_ok = vma->vm_flags & VM_WRITE; |
342d3db7 PM |
669 | } |
670 | up_read(¤t->mm->mmap_sem); | |
671 | if (!pfn) | |
672 | return -EFAULT; | |
673 | } else { | |
674 | page = pages[0]; | |
675 | if (PageHuge(page)) { | |
676 | page = compound_head(page); | |
677 | pte_size <<= compound_order(page); | |
678 | } | |
4cf302bc PM |
679 | /* if the guest wants write access, see if that is OK */ |
680 | if (!writing && hpte_is_writable(r)) { | |
681 | pte_t *ptep, pte; | |
682 | ||
683 | /* | |
684 | * We need to protect against page table destruction | |
685 | * while looking up and updating the pte. | |
686 | */ | |
687 | rcu_read_lock_sched(); | |
688 | ptep = find_linux_pte_or_hugepte(current->mm->pgd, | |
689 | hva, NULL); | |
690 | if (ptep && pte_present(*ptep)) { | |
691 | pte = kvmppc_read_update_linux_pte(ptep, 1); | |
692 | if (pte_write(pte)) | |
693 | write_ok = 1; | |
694 | } | |
695 | rcu_read_unlock_sched(); | |
696 | } | |
342d3db7 PM |
697 | pfn = page_to_pfn(page); |
698 | } | |
699 | ||
700 | ret = -EFAULT; | |
701 | if (psize > pte_size) | |
702 | goto out_put; | |
703 | ||
704 | /* Check WIMG vs. the actual page we're accessing */ | |
705 | if (!hpte_cache_flags_ok(r, is_io)) { | |
706 | if (is_io) | |
707 | return -EFAULT; | |
708 | /* | |
709 | * Allow guest to map emulated device memory as | |
710 | * uncacheable, but actually make it cacheable. | |
711 | */ | |
712 | r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; | |
713 | } | |
714 | ||
715 | /* Set the HPTE to point to pfn */ | |
716 | r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT); | |
4cf302bc PM |
717 | if (hpte_is_writable(r) && !write_ok) |
718 | r = hpte_make_readonly(r); | |
342d3db7 PM |
719 | ret = RESUME_GUEST; |
720 | preempt_disable(); | |
721 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) | |
722 | cpu_relax(); | |
723 | if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] || | |
724 | rev->guest_rpte != hpte[2]) | |
725 | /* HPTE has been changed under us; let the guest retry */ | |
726 | goto out_unlock; | |
727 | hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; | |
728 | ||
d89cc617 | 729 | rmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; |
342d3db7 PM |
730 | lock_rmap(rmap); |
731 | ||
732 | /* Check if we might have been invalidated; let the guest retry if so */ | |
733 | ret = RESUME_GUEST; | |
8ca40a70 | 734 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) { |
342d3db7 PM |
735 | unlock_rmap(rmap); |
736 | goto out_unlock; | |
737 | } | |
4cf302bc | 738 | |
bad3b507 PM |
739 | /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */ |
740 | rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; | |
741 | r &= rcbits | ~(HPTE_R_R | HPTE_R_C); | |
742 | ||
4cf302bc PM |
743 | if (hptep[0] & HPTE_V_VALID) { |
744 | /* HPTE was previously valid, so we need to invalidate it */ | |
745 | unlock_rmap(rmap); | |
746 | hptep[0] |= HPTE_V_ABSENT; | |
747 | kvmppc_invalidate_hpte(kvm, hptep, index); | |
bad3b507 PM |
748 | /* don't lose previous R and C bits */ |
749 | r |= hptep[1] & (HPTE_R_R | HPTE_R_C); | |
4cf302bc PM |
750 | } else { |
751 | kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0); | |
752 | } | |
342d3db7 PM |
753 | |
754 | hptep[1] = r; | |
755 | eieio(); | |
756 | hptep[0] = hpte[0]; | |
757 | asm volatile("ptesync" : : : "memory"); | |
758 | preempt_enable(); | |
4cf302bc | 759 | if (page && hpte_is_writable(r)) |
342d3db7 PM |
760 | SetPageDirty(page); |
761 | ||
762 | out_put: | |
de6c0b02 DG |
763 | if (page) { |
764 | /* | |
765 | * We drop pages[0] here, not page because page might | |
766 | * have been set to the head page of a compound, but | |
767 | * we have to drop the reference on the correct tail | |
768 | * page to match the get inside gup() | |
769 | */ | |
770 | put_page(pages[0]); | |
771 | } | |
342d3db7 PM |
772 | return ret; |
773 | ||
774 | out_unlock: | |
775 | hptep[0] &= ~HPTE_V_HVLOCK; | |
776 | preempt_enable(); | |
777 | goto out_put; | |
778 | } | |
779 | ||
a64fd707 PM |
780 | static void kvmppc_rmap_reset(struct kvm *kvm) |
781 | { | |
782 | struct kvm_memslots *slots; | |
783 | struct kvm_memory_slot *memslot; | |
784 | int srcu_idx; | |
785 | ||
786 | srcu_idx = srcu_read_lock(&kvm->srcu); | |
787 | slots = kvm->memslots; | |
788 | kvm_for_each_memslot(memslot, slots) { | |
789 | /* | |
790 | * This assumes it is acceptable to lose reference and | |
791 | * change bits across a reset. | |
792 | */ | |
793 | memset(memslot->arch.rmap, 0, | |
794 | memslot->npages * sizeof(*memslot->arch.rmap)); | |
795 | } | |
796 | srcu_read_unlock(&kvm->srcu, srcu_idx); | |
797 | } | |
798 | ||
84504ef3 TY |
799 | static int kvm_handle_hva_range(struct kvm *kvm, |
800 | unsigned long start, | |
801 | unsigned long end, | |
802 | int (*handler)(struct kvm *kvm, | |
803 | unsigned long *rmapp, | |
804 | unsigned long gfn)) | |
342d3db7 PM |
805 | { |
806 | int ret; | |
807 | int retval = 0; | |
808 | struct kvm_memslots *slots; | |
809 | struct kvm_memory_slot *memslot; | |
810 | ||
811 | slots = kvm_memslots(kvm); | |
812 | kvm_for_each_memslot(memslot, slots) { | |
84504ef3 TY |
813 | unsigned long hva_start, hva_end; |
814 | gfn_t gfn, gfn_end; | |
815 | ||
816 | hva_start = max(start, memslot->userspace_addr); | |
817 | hva_end = min(end, memslot->userspace_addr + | |
818 | (memslot->npages << PAGE_SHIFT)); | |
819 | if (hva_start >= hva_end) | |
820 | continue; | |
821 | /* | |
822 | * {gfn(page) | page intersects with [hva_start, hva_end)} = | |
823 | * {gfn, gfn+1, ..., gfn_end-1}. | |
824 | */ | |
825 | gfn = hva_to_gfn_memslot(hva_start, memslot); | |
826 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); | |
342d3db7 | 827 | |
84504ef3 | 828 | for (; gfn < gfn_end; ++gfn) { |
d19a748b | 829 | gfn_t gfn_offset = gfn - memslot->base_gfn; |
342d3db7 | 830 | |
d89cc617 | 831 | ret = handler(kvm, &memslot->arch.rmap[gfn_offset], gfn); |
342d3db7 PM |
832 | retval |= ret; |
833 | } | |
834 | } | |
835 | ||
836 | return retval; | |
837 | } | |
838 | ||
84504ef3 TY |
839 | static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, |
840 | int (*handler)(struct kvm *kvm, unsigned long *rmapp, | |
841 | unsigned long gfn)) | |
842 | { | |
843 | return kvm_handle_hva_range(kvm, hva, hva + 1, handler); | |
844 | } | |
845 | ||
342d3db7 PM |
846 | static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, |
847 | unsigned long gfn) | |
848 | { | |
849 | struct revmap_entry *rev = kvm->arch.revmap; | |
850 | unsigned long h, i, j; | |
851 | unsigned long *hptep; | |
bad3b507 | 852 | unsigned long ptel, psize, rcbits; |
342d3db7 PM |
853 | |
854 | for (;;) { | |
bad3b507 | 855 | lock_rmap(rmapp); |
342d3db7 | 856 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { |
bad3b507 | 857 | unlock_rmap(rmapp); |
342d3db7 PM |
858 | break; |
859 | } | |
860 | ||
861 | /* | |
862 | * To avoid an ABBA deadlock with the HPTE lock bit, | |
bad3b507 PM |
863 | * we can't spin on the HPTE lock while holding the |
864 | * rmap chain lock. | |
342d3db7 PM |
865 | */ |
866 | i = *rmapp & KVMPPC_RMAP_INDEX; | |
bad3b507 PM |
867 | hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); |
868 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { | |
869 | /* unlock rmap before spinning on the HPTE lock */ | |
870 | unlock_rmap(rmapp); | |
871 | while (hptep[0] & HPTE_V_HVLOCK) | |
872 | cpu_relax(); | |
873 | continue; | |
874 | } | |
342d3db7 PM |
875 | j = rev[i].forw; |
876 | if (j == i) { | |
877 | /* chain is now empty */ | |
bad3b507 | 878 | *rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX); |
342d3db7 PM |
879 | } else { |
880 | /* remove i from chain */ | |
881 | h = rev[i].back; | |
882 | rev[h].forw = j; | |
883 | rev[j].back = h; | |
884 | rev[i].forw = rev[i].back = i; | |
bad3b507 | 885 | *rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j; |
342d3db7 | 886 | } |
342d3db7 | 887 | |
bad3b507 | 888 | /* Now check and modify the HPTE */ |
342d3db7 PM |
889 | ptel = rev[i].guest_rpte; |
890 | psize = hpte_page_size(hptep[0], ptel); | |
891 | if ((hptep[0] & HPTE_V_VALID) && | |
892 | hpte_rpn(ptel, psize) == gfn) { | |
dfe49dbd PM |
893 | if (kvm->arch.using_mmu_notifiers) |
894 | hptep[0] |= HPTE_V_ABSENT; | |
bad3b507 PM |
895 | kvmppc_invalidate_hpte(kvm, hptep, i); |
896 | /* Harvest R and C */ | |
897 | rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C); | |
898 | *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT; | |
899 | rev[i].guest_rpte = ptel | rcbits; | |
342d3db7 | 900 | } |
bad3b507 | 901 | unlock_rmap(rmapp); |
342d3db7 PM |
902 | hptep[0] &= ~HPTE_V_HVLOCK; |
903 | } | |
904 | return 0; | |
905 | } | |
906 | ||
907 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | |
908 | { | |
909 | if (kvm->arch.using_mmu_notifiers) | |
910 | kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); | |
911 | return 0; | |
912 | } | |
913 | ||
b3ae2096 TY |
914 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) |
915 | { | |
916 | if (kvm->arch.using_mmu_notifiers) | |
917 | kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp); | |
918 | return 0; | |
919 | } | |
920 | ||
dfe49dbd PM |
921 | void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) |
922 | { | |
923 | unsigned long *rmapp; | |
924 | unsigned long gfn; | |
925 | unsigned long n; | |
926 | ||
927 | rmapp = memslot->arch.rmap; | |
928 | gfn = memslot->base_gfn; | |
929 | for (n = memslot->npages; n; --n) { | |
930 | /* | |
931 | * Testing the present bit without locking is OK because | |
932 | * the memslot has been marked invalid already, and hence | |
933 | * no new HPTEs referencing this page can be created, | |
934 | * thus the present bit can't go from 0 to 1. | |
935 | */ | |
936 | if (*rmapp & KVMPPC_RMAP_PRESENT) | |
937 | kvm_unmap_rmapp(kvm, rmapp, gfn); | |
938 | ++rmapp; | |
939 | ++gfn; | |
940 | } | |
941 | } | |
942 | ||
342d3db7 PM |
943 | static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, |
944 | unsigned long gfn) | |
945 | { | |
55514893 PM |
946 | struct revmap_entry *rev = kvm->arch.revmap; |
947 | unsigned long head, i, j; | |
948 | unsigned long *hptep; | |
949 | int ret = 0; | |
950 | ||
951 | retry: | |
952 | lock_rmap(rmapp); | |
953 | if (*rmapp & KVMPPC_RMAP_REFERENCED) { | |
954 | *rmapp &= ~KVMPPC_RMAP_REFERENCED; | |
955 | ret = 1; | |
956 | } | |
957 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { | |
958 | unlock_rmap(rmapp); | |
959 | return ret; | |
960 | } | |
961 | ||
962 | i = head = *rmapp & KVMPPC_RMAP_INDEX; | |
963 | do { | |
964 | hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); | |
965 | j = rev[i].forw; | |
966 | ||
967 | /* If this HPTE isn't referenced, ignore it */ | |
968 | if (!(hptep[1] & HPTE_R_R)) | |
969 | continue; | |
970 | ||
971 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { | |
972 | /* unlock rmap before spinning on the HPTE lock */ | |
973 | unlock_rmap(rmapp); | |
974 | while (hptep[0] & HPTE_V_HVLOCK) | |
975 | cpu_relax(); | |
976 | goto retry; | |
977 | } | |
978 | ||
979 | /* Now check and modify the HPTE */ | |
980 | if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) { | |
981 | kvmppc_clear_ref_hpte(kvm, hptep, i); | |
982 | rev[i].guest_rpte |= HPTE_R_R; | |
983 | ret = 1; | |
984 | } | |
985 | hptep[0] &= ~HPTE_V_HVLOCK; | |
986 | } while ((i = j) != head); | |
987 | ||
988 | unlock_rmap(rmapp); | |
989 | return ret; | |
342d3db7 PM |
990 | } |
991 | ||
992 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) | |
993 | { | |
994 | if (!kvm->arch.using_mmu_notifiers) | |
995 | return 0; | |
996 | return kvm_handle_hva(kvm, hva, kvm_age_rmapp); | |
997 | } | |
998 | ||
999 | static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, | |
1000 | unsigned long gfn) | |
1001 | { | |
55514893 PM |
1002 | struct revmap_entry *rev = kvm->arch.revmap; |
1003 | unsigned long head, i, j; | |
1004 | unsigned long *hp; | |
1005 | int ret = 1; | |
1006 | ||
1007 | if (*rmapp & KVMPPC_RMAP_REFERENCED) | |
1008 | return 1; | |
1009 | ||
1010 | lock_rmap(rmapp); | |
1011 | if (*rmapp & KVMPPC_RMAP_REFERENCED) | |
1012 | goto out; | |
1013 | ||
1014 | if (*rmapp & KVMPPC_RMAP_PRESENT) { | |
1015 | i = head = *rmapp & KVMPPC_RMAP_INDEX; | |
1016 | do { | |
1017 | hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4)); | |
1018 | j = rev[i].forw; | |
1019 | if (hp[1] & HPTE_R_R) | |
1020 | goto out; | |
1021 | } while ((i = j) != head); | |
1022 | } | |
1023 | ret = 0; | |
1024 | ||
1025 | out: | |
1026 | unlock_rmap(rmapp); | |
1027 | return ret; | |
342d3db7 PM |
1028 | } |
1029 | ||
1030 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | |
1031 | { | |
1032 | if (!kvm->arch.using_mmu_notifiers) | |
1033 | return 0; | |
1034 | return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp); | |
1035 | } | |
1036 | ||
1037 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | |
1038 | { | |
1039 | if (!kvm->arch.using_mmu_notifiers) | |
1040 | return; | |
1041 | kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); | |
de56a948 PM |
1042 | } |
1043 | ||
82ed3616 PM |
1044 | static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp) |
1045 | { | |
1046 | struct revmap_entry *rev = kvm->arch.revmap; | |
1047 | unsigned long head, i, j; | |
1048 | unsigned long *hptep; | |
1049 | int ret = 0; | |
1050 | ||
1051 | retry: | |
1052 | lock_rmap(rmapp); | |
1053 | if (*rmapp & KVMPPC_RMAP_CHANGED) { | |
1054 | *rmapp &= ~KVMPPC_RMAP_CHANGED; | |
1055 | ret = 1; | |
1056 | } | |
1057 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { | |
1058 | unlock_rmap(rmapp); | |
1059 | return ret; | |
1060 | } | |
1061 | ||
1062 | i = head = *rmapp & KVMPPC_RMAP_INDEX; | |
1063 | do { | |
1064 | hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); | |
1065 | j = rev[i].forw; | |
1066 | ||
1067 | if (!(hptep[1] & HPTE_R_C)) | |
1068 | continue; | |
1069 | ||
1070 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { | |
1071 | /* unlock rmap before spinning on the HPTE lock */ | |
1072 | unlock_rmap(rmapp); | |
1073 | while (hptep[0] & HPTE_V_HVLOCK) | |
1074 | cpu_relax(); | |
1075 | goto retry; | |
1076 | } | |
1077 | ||
1078 | /* Now check and modify the HPTE */ | |
1079 | if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_C)) { | |
1080 | /* need to make it temporarily absent to clear C */ | |
1081 | hptep[0] |= HPTE_V_ABSENT; | |
1082 | kvmppc_invalidate_hpte(kvm, hptep, i); | |
1083 | hptep[1] &= ~HPTE_R_C; | |
1084 | eieio(); | |
1085 | hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; | |
1086 | rev[i].guest_rpte |= HPTE_R_C; | |
1087 | ret = 1; | |
1088 | } | |
1089 | hptep[0] &= ~HPTE_V_HVLOCK; | |
1090 | } while ((i = j) != head); | |
1091 | ||
1092 | unlock_rmap(rmapp); | |
1093 | return ret; | |
1094 | } | |
1095 | ||
dfe49dbd PM |
1096 | long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot, |
1097 | unsigned long *map) | |
82ed3616 PM |
1098 | { |
1099 | unsigned long i; | |
dfe49dbd | 1100 | unsigned long *rmapp; |
82ed3616 PM |
1101 | |
1102 | preempt_disable(); | |
d89cc617 | 1103 | rmapp = memslot->arch.rmap; |
82ed3616 | 1104 | for (i = 0; i < memslot->npages; ++i) { |
dfe49dbd | 1105 | if (kvm_test_clear_dirty(kvm, rmapp) && map) |
82ed3616 PM |
1106 | __set_bit_le(i, map); |
1107 | ++rmapp; | |
1108 | } | |
1109 | preempt_enable(); | |
1110 | return 0; | |
1111 | } | |
1112 | ||
93e60249 PM |
1113 | void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, |
1114 | unsigned long *nb_ret) | |
1115 | { | |
1116 | struct kvm_memory_slot *memslot; | |
1117 | unsigned long gfn = gpa >> PAGE_SHIFT; | |
342d3db7 PM |
1118 | struct page *page, *pages[1]; |
1119 | int npages; | |
1120 | unsigned long hva, psize, offset; | |
da9d1d7f | 1121 | unsigned long pa; |
93e60249 | 1122 | unsigned long *physp; |
2c9097e4 | 1123 | int srcu_idx; |
93e60249 | 1124 | |
2c9097e4 | 1125 | srcu_idx = srcu_read_lock(&kvm->srcu); |
93e60249 PM |
1126 | memslot = gfn_to_memslot(kvm, gfn); |
1127 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) | |
2c9097e4 | 1128 | goto err; |
342d3db7 | 1129 | if (!kvm->arch.using_mmu_notifiers) { |
a66b48c3 | 1130 | physp = memslot->arch.slot_phys; |
342d3db7 | 1131 | if (!physp) |
2c9097e4 | 1132 | goto err; |
342d3db7 | 1133 | physp += gfn - memslot->base_gfn; |
c77162de | 1134 | pa = *physp; |
342d3db7 PM |
1135 | if (!pa) { |
1136 | if (kvmppc_get_guest_page(kvm, gfn, memslot, | |
1137 | PAGE_SIZE) < 0) | |
2c9097e4 | 1138 | goto err; |
342d3db7 PM |
1139 | pa = *physp; |
1140 | } | |
1141 | page = pfn_to_page(pa >> PAGE_SHIFT); | |
de6c0b02 | 1142 | get_page(page); |
342d3db7 PM |
1143 | } else { |
1144 | hva = gfn_to_hva_memslot(memslot, gfn); | |
1145 | npages = get_user_pages_fast(hva, 1, 1, pages); | |
1146 | if (npages < 1) | |
2c9097e4 | 1147 | goto err; |
342d3db7 | 1148 | page = pages[0]; |
c77162de | 1149 | } |
2c9097e4 PM |
1150 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
1151 | ||
da9d1d7f PM |
1152 | psize = PAGE_SIZE; |
1153 | if (PageHuge(page)) { | |
1154 | page = compound_head(page); | |
1155 | psize <<= compound_order(page); | |
1156 | } | |
da9d1d7f | 1157 | offset = gpa & (psize - 1); |
93e60249 | 1158 | if (nb_ret) |
da9d1d7f | 1159 | *nb_ret = psize - offset; |
93e60249 | 1160 | return page_address(page) + offset; |
2c9097e4 PM |
1161 | |
1162 | err: | |
1163 | srcu_read_unlock(&kvm->srcu, srcu_idx); | |
1164 | return NULL; | |
93e60249 PM |
1165 | } |
1166 | ||
1167 | void kvmppc_unpin_guest_page(struct kvm *kvm, void *va) | |
1168 | { | |
1169 | struct page *page = virt_to_page(va); | |
1170 | ||
93e60249 PM |
1171 | put_page(page); |
1172 | } | |
1173 | ||
a2932923 PM |
1174 | /* |
1175 | * Functions for reading and writing the hash table via reads and | |
1176 | * writes on a file descriptor. | |
1177 | * | |
1178 | * Reads return the guest view of the hash table, which has to be | |
1179 | * pieced together from the real hash table and the guest_rpte | |
1180 | * values in the revmap array. | |
1181 | * | |
1182 | * On writes, each HPTE written is considered in turn, and if it | |
1183 | * is valid, it is written to the HPT as if an H_ENTER with the | |
1184 | * exact flag set was done. When the invalid count is non-zero | |
1185 | * in the header written to the stream, the kernel will make | |
1186 | * sure that that many HPTEs are invalid, and invalidate them | |
1187 | * if not. | |
1188 | */ | |
1189 | ||
1190 | struct kvm_htab_ctx { | |
1191 | unsigned long index; | |
1192 | unsigned long flags; | |
1193 | struct kvm *kvm; | |
1194 | int first_pass; | |
1195 | }; | |
1196 | ||
1197 | #define HPTE_SIZE (2 * sizeof(unsigned long)) | |
1198 | ||
1199 | static long record_hpte(unsigned long flags, unsigned long *hptp, | |
1200 | unsigned long *hpte, struct revmap_entry *revp, | |
1201 | int want_valid, int first_pass) | |
1202 | { | |
1203 | unsigned long v, r; | |
1204 | int ok = 1; | |
1205 | int valid, dirty; | |
1206 | ||
1207 | /* Unmodified entries are uninteresting except on the first pass */ | |
1208 | dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED); | |
1209 | if (!first_pass && !dirty) | |
1210 | return 0; | |
1211 | ||
1212 | valid = 0; | |
1213 | if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) { | |
1214 | valid = 1; | |
1215 | if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && | |
1216 | !(hptp[0] & HPTE_V_BOLTED)) | |
1217 | valid = 0; | |
1218 | } | |
1219 | if (valid != want_valid) | |
1220 | return 0; | |
1221 | ||
1222 | v = r = 0; | |
1223 | if (valid || dirty) { | |
1224 | /* lock the HPTE so it's stable and read it */ | |
1225 | preempt_disable(); | |
1226 | while (!try_lock_hpte(hptp, HPTE_V_HVLOCK)) | |
1227 | cpu_relax(); | |
1228 | v = hptp[0]; | |
1229 | if (v & HPTE_V_ABSENT) { | |
1230 | v &= ~HPTE_V_ABSENT; | |
1231 | v |= HPTE_V_VALID; | |
1232 | } | |
1233 | /* re-evaluate valid and dirty from synchronized HPTE value */ | |
1234 | valid = !!(v & HPTE_V_VALID); | |
1235 | if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && !(v & HPTE_V_BOLTED)) | |
1236 | valid = 0; | |
1237 | r = revp->guest_rpte | (hptp[1] & (HPTE_R_R | HPTE_R_C)); | |
1238 | dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED); | |
1239 | /* only clear modified if this is the right sort of entry */ | |
1240 | if (valid == want_valid && dirty) { | |
1241 | r &= ~HPTE_GR_MODIFIED; | |
1242 | revp->guest_rpte = r; | |
1243 | } | |
1244 | asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); | |
1245 | hptp[0] &= ~HPTE_V_HVLOCK; | |
1246 | preempt_enable(); | |
1247 | if (!(valid == want_valid && (first_pass || dirty))) | |
1248 | ok = 0; | |
1249 | } | |
1250 | hpte[0] = v; | |
1251 | hpte[1] = r; | |
1252 | return ok; | |
1253 | } | |
1254 | ||
1255 | static ssize_t kvm_htab_read(struct file *file, char __user *buf, | |
1256 | size_t count, loff_t *ppos) | |
1257 | { | |
1258 | struct kvm_htab_ctx *ctx = file->private_data; | |
1259 | struct kvm *kvm = ctx->kvm; | |
1260 | struct kvm_get_htab_header hdr; | |
1261 | unsigned long *hptp; | |
1262 | struct revmap_entry *revp; | |
1263 | unsigned long i, nb, nw; | |
1264 | unsigned long __user *lbuf; | |
1265 | struct kvm_get_htab_header __user *hptr; | |
1266 | unsigned long flags; | |
1267 | int first_pass; | |
1268 | unsigned long hpte[2]; | |
1269 | ||
1270 | if (!access_ok(VERIFY_WRITE, buf, count)) | |
1271 | return -EFAULT; | |
1272 | ||
1273 | first_pass = ctx->first_pass; | |
1274 | flags = ctx->flags; | |
1275 | ||
1276 | i = ctx->index; | |
1277 | hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); | |
1278 | revp = kvm->arch.revmap + i; | |
1279 | lbuf = (unsigned long __user *)buf; | |
1280 | ||
1281 | nb = 0; | |
1282 | while (nb + sizeof(hdr) + HPTE_SIZE < count) { | |
1283 | /* Initialize header */ | |
1284 | hptr = (struct kvm_get_htab_header __user *)buf; | |
1285 | hdr.index = i; | |
1286 | hdr.n_valid = 0; | |
1287 | hdr.n_invalid = 0; | |
1288 | nw = nb; | |
1289 | nb += sizeof(hdr); | |
1290 | lbuf = (unsigned long __user *)(buf + sizeof(hdr)); | |
1291 | ||
1292 | /* Skip uninteresting entries, i.e. clean on not-first pass */ | |
1293 | if (!first_pass) { | |
1294 | while (i < kvm->arch.hpt_npte && | |
1295 | !(revp->guest_rpte & HPTE_GR_MODIFIED)) { | |
1296 | ++i; | |
1297 | hptp += 2; | |
1298 | ++revp; | |
1299 | } | |
1300 | } | |
1301 | ||
1302 | /* Grab a series of valid entries */ | |
1303 | while (i < kvm->arch.hpt_npte && | |
1304 | hdr.n_valid < 0xffff && | |
1305 | nb + HPTE_SIZE < count && | |
1306 | record_hpte(flags, hptp, hpte, revp, 1, first_pass)) { | |
1307 | /* valid entry, write it out */ | |
1308 | ++hdr.n_valid; | |
1309 | if (__put_user(hpte[0], lbuf) || | |
1310 | __put_user(hpte[1], lbuf + 1)) | |
1311 | return -EFAULT; | |
1312 | nb += HPTE_SIZE; | |
1313 | lbuf += 2; | |
1314 | ++i; | |
1315 | hptp += 2; | |
1316 | ++revp; | |
1317 | } | |
1318 | /* Now skip invalid entries while we can */ | |
1319 | while (i < kvm->arch.hpt_npte && | |
1320 | hdr.n_invalid < 0xffff && | |
1321 | record_hpte(flags, hptp, hpte, revp, 0, first_pass)) { | |
1322 | /* found an invalid entry */ | |
1323 | ++hdr.n_invalid; | |
1324 | ++i; | |
1325 | hptp += 2; | |
1326 | ++revp; | |
1327 | } | |
1328 | ||
1329 | if (hdr.n_valid || hdr.n_invalid) { | |
1330 | /* write back the header */ | |
1331 | if (__copy_to_user(hptr, &hdr, sizeof(hdr))) | |
1332 | return -EFAULT; | |
1333 | nw = nb; | |
1334 | buf = (char __user *)lbuf; | |
1335 | } else { | |
1336 | nb = nw; | |
1337 | } | |
1338 | ||
1339 | /* Check if we've wrapped around the hash table */ | |
1340 | if (i >= kvm->arch.hpt_npte) { | |
1341 | i = 0; | |
1342 | ctx->first_pass = 0; | |
1343 | break; | |
1344 | } | |
1345 | } | |
1346 | ||
1347 | ctx->index = i; | |
1348 | ||
1349 | return nb; | |
1350 | } | |
1351 | ||
1352 | static ssize_t kvm_htab_write(struct file *file, const char __user *buf, | |
1353 | size_t count, loff_t *ppos) | |
1354 | { | |
1355 | struct kvm_htab_ctx *ctx = file->private_data; | |
1356 | struct kvm *kvm = ctx->kvm; | |
1357 | struct kvm_get_htab_header hdr; | |
1358 | unsigned long i, j; | |
1359 | unsigned long v, r; | |
1360 | unsigned long __user *lbuf; | |
1361 | unsigned long *hptp; | |
1362 | unsigned long tmp[2]; | |
1363 | ssize_t nb; | |
1364 | long int err, ret; | |
1365 | int rma_setup; | |
1366 | ||
1367 | if (!access_ok(VERIFY_READ, buf, count)) | |
1368 | return -EFAULT; | |
1369 | ||
1370 | /* lock out vcpus from running while we're doing this */ | |
1371 | mutex_lock(&kvm->lock); | |
1372 | rma_setup = kvm->arch.rma_setup_done; | |
1373 | if (rma_setup) { | |
1374 | kvm->arch.rma_setup_done = 0; /* temporarily */ | |
1375 | /* order rma_setup_done vs. vcpus_running */ | |
1376 | smp_mb(); | |
1377 | if (atomic_read(&kvm->arch.vcpus_running)) { | |
1378 | kvm->arch.rma_setup_done = 1; | |
1379 | mutex_unlock(&kvm->lock); | |
1380 | return -EBUSY; | |
1381 | } | |
1382 | } | |
1383 | ||
1384 | err = 0; | |
1385 | for (nb = 0; nb + sizeof(hdr) <= count; ) { | |
1386 | err = -EFAULT; | |
1387 | if (__copy_from_user(&hdr, buf, sizeof(hdr))) | |
1388 | break; | |
1389 | ||
1390 | err = 0; | |
1391 | if (nb + hdr.n_valid * HPTE_SIZE > count) | |
1392 | break; | |
1393 | ||
1394 | nb += sizeof(hdr); | |
1395 | buf += sizeof(hdr); | |
1396 | ||
1397 | err = -EINVAL; | |
1398 | i = hdr.index; | |
1399 | if (i >= kvm->arch.hpt_npte || | |
1400 | i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte) | |
1401 | break; | |
1402 | ||
1403 | hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); | |
1404 | lbuf = (unsigned long __user *)buf; | |
1405 | for (j = 0; j < hdr.n_valid; ++j) { | |
1406 | err = -EFAULT; | |
1407 | if (__get_user(v, lbuf) || __get_user(r, lbuf + 1)) | |
1408 | goto out; | |
1409 | err = -EINVAL; | |
1410 | if (!(v & HPTE_V_VALID)) | |
1411 | goto out; | |
1412 | lbuf += 2; | |
1413 | nb += HPTE_SIZE; | |
1414 | ||
1415 | if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) | |
1416 | kvmppc_do_h_remove(kvm, 0, i, 0, tmp); | |
1417 | err = -EIO; | |
1418 | ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r, | |
1419 | tmp); | |
1420 | if (ret != H_SUCCESS) { | |
1421 | pr_err("kvm_htab_write ret %ld i=%ld v=%lx " | |
1422 | "r=%lx\n", ret, i, v, r); | |
1423 | goto out; | |
1424 | } | |
1425 | if (!rma_setup && is_vrma_hpte(v)) { | |
1426 | unsigned long psize = hpte_page_size(v, r); | |
1427 | unsigned long senc = slb_pgsize_encoding(psize); | |
1428 | unsigned long lpcr; | |
1429 | ||
1430 | kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | | |
1431 | (VRMA_VSID << SLB_VSID_SHIFT_1T); | |
1432 | lpcr = kvm->arch.lpcr & ~LPCR_VRMASD; | |
1433 | lpcr |= senc << (LPCR_VRMASD_SH - 4); | |
1434 | kvm->arch.lpcr = lpcr; | |
1435 | rma_setup = 1; | |
1436 | } | |
1437 | ++i; | |
1438 | hptp += 2; | |
1439 | } | |
1440 | ||
1441 | for (j = 0; j < hdr.n_invalid; ++j) { | |
1442 | if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) | |
1443 | kvmppc_do_h_remove(kvm, 0, i, 0, tmp); | |
1444 | ++i; | |
1445 | hptp += 2; | |
1446 | } | |
1447 | err = 0; | |
1448 | } | |
1449 | ||
1450 | out: | |
1451 | /* Order HPTE updates vs. rma_setup_done */ | |
1452 | smp_wmb(); | |
1453 | kvm->arch.rma_setup_done = rma_setup; | |
1454 | mutex_unlock(&kvm->lock); | |
1455 | ||
1456 | if (err) | |
1457 | return err; | |
1458 | return nb; | |
1459 | } | |
1460 | ||
1461 | static int kvm_htab_release(struct inode *inode, struct file *filp) | |
1462 | { | |
1463 | struct kvm_htab_ctx *ctx = filp->private_data; | |
1464 | ||
1465 | filp->private_data = NULL; | |
1466 | if (!(ctx->flags & KVM_GET_HTAB_WRITE)) | |
1467 | atomic_dec(&ctx->kvm->arch.hpte_mod_interest); | |
1468 | kvm_put_kvm(ctx->kvm); | |
1469 | kfree(ctx); | |
1470 | return 0; | |
1471 | } | |
1472 | ||
1473 | static struct file_operations kvm_htab_fops = { | |
1474 | .read = kvm_htab_read, | |
1475 | .write = kvm_htab_write, | |
1476 | .llseek = default_llseek, | |
1477 | .release = kvm_htab_release, | |
1478 | }; | |
1479 | ||
1480 | int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf) | |
1481 | { | |
1482 | int ret; | |
1483 | struct kvm_htab_ctx *ctx; | |
1484 | int rwflag; | |
1485 | ||
1486 | /* reject flags we don't recognize */ | |
1487 | if (ghf->flags & ~(KVM_GET_HTAB_BOLTED_ONLY | KVM_GET_HTAB_WRITE)) | |
1488 | return -EINVAL; | |
1489 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | |
1490 | if (!ctx) | |
1491 | return -ENOMEM; | |
1492 | kvm_get_kvm(kvm); | |
1493 | ctx->kvm = kvm; | |
1494 | ctx->index = ghf->start_index; | |
1495 | ctx->flags = ghf->flags; | |
1496 | ctx->first_pass = 1; | |
1497 | ||
1498 | rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY; | |
1499 | ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag); | |
1500 | if (ret < 0) { | |
1501 | kvm_put_kvm(kvm); | |
1502 | return ret; | |
1503 | } | |
1504 | ||
1505 | if (rwflag == O_RDONLY) { | |
1506 | mutex_lock(&kvm->slots_lock); | |
1507 | atomic_inc(&kvm->arch.hpte_mod_interest); | |
1508 | /* make sure kvmppc_do_h_enter etc. see the increment */ | |
1509 | synchronize_srcu_expedited(&kvm->srcu); | |
1510 | mutex_unlock(&kvm->slots_lock); | |
1511 | } | |
1512 | ||
1513 | return ret; | |
1514 | } | |
1515 | ||
de56a948 PM |
1516 | void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu) |
1517 | { | |
1518 | struct kvmppc_mmu *mmu = &vcpu->arch.mmu; | |
1519 | ||
9e368f29 PM |
1520 | if (cpu_has_feature(CPU_FTR_ARCH_206)) |
1521 | vcpu->arch.slb_nr = 32; /* POWER7 */ | |
1522 | else | |
1523 | vcpu->arch.slb_nr = 64; | |
de56a948 PM |
1524 | |
1525 | mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate; | |
1526 | mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr; | |
1527 | ||
1528 | vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; | |
1529 | } |