| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * |
| 4 | * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
| 5 | */ |
| 6 | |
| 7 | #include <linux/types.h> |
| 8 | #include <linux/string.h> |
| 9 | #include <linux/kvm.h> |
| 10 | #include <linux/kvm_host.h> |
| 11 | #include <linux/highmem.h> |
| 12 | #include <linux/gfp.h> |
| 13 | #include <linux/slab.h> |
| 14 | #include <linux/hugetlb.h> |
| 15 | #include <linux/vmalloc.h> |
| 16 | #include <linux/srcu.h> |
| 17 | #include <linux/anon_inodes.h> |
| 18 | #include <linux/file.h> |
| 19 | #include <linux/debugfs.h> |
| 20 | |
| 21 | #include <asm/kvm_ppc.h> |
| 22 | #include <asm/kvm_book3s.h> |
| 23 | #include <asm/book3s/64/mmu-hash.h> |
| 24 | #include <asm/hvcall.h> |
| 25 | #include <asm/synch.h> |
| 26 | #include <asm/ppc-opcode.h> |
| 27 | #include <asm/cputable.h> |
| 28 | #include <asm/pte-walk.h> |
| 29 | |
| 30 | #include "book3s.h" |
| 31 | #include "book3s_hv.h" |
| 32 | #include "trace_hv.h" |
| 33 | |
| 34 | //#define DEBUG_RESIZE_HPT 1 |
| 35 | |
| 36 | #ifdef DEBUG_RESIZE_HPT |
| 37 | #define resize_hpt_debug(resize, ...) \ |
| 38 | do { \ |
| 39 | printk(KERN_DEBUG "RESIZE HPT %p: ", resize); \ |
| 40 | printk(__VA_ARGS__); \ |
| 41 | } while (0) |
| 42 | #else |
| 43 | #define resize_hpt_debug(resize, ...) \ |
| 44 | do { } while (0) |
| 45 | #endif |
| 46 | |
| 47 | static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, |
| 48 | long pte_index, unsigned long pteh, |
| 49 | unsigned long ptel, unsigned long *pte_idx_ret); |
| 50 | |
| 51 | struct kvm_resize_hpt { |
| 52 | /* These fields read-only after init */ |
| 53 | struct kvm *kvm; |
| 54 | struct work_struct work; |
| 55 | u32 order; |
| 56 | |
| 57 | /* These fields protected by kvm->arch.mmu_setup_lock */ |
| 58 | |
| 59 | /* Possible values and their usage: |
| 60 | * <0 an error occurred during allocation, |
| 61 | * -EBUSY allocation is in the progress, |
| 62 | * 0 allocation made successfully. |
| 63 | */ |
| 64 | int error; |
| 65 | |
| 66 | /* Private to the work thread, until error != -EBUSY, |
| 67 | * then protected by kvm->arch.mmu_setup_lock. |
| 68 | */ |
| 69 | struct kvm_hpt_info hpt; |
| 70 | }; |
| 71 | |
| 72 | int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order) |
| 73 | { |
| 74 | unsigned long hpt = 0; |
| 75 | int cma = 0; |
| 76 | struct page *page = NULL; |
| 77 | struct revmap_entry *rev; |
| 78 | unsigned long npte; |
| 79 | |
| 80 | if ((order < PPC_MIN_HPT_ORDER) || (order > PPC_MAX_HPT_ORDER)) |
| 81 | return -EINVAL; |
| 82 | |
| 83 | page = kvm_alloc_hpt_cma(1ul << (order - PAGE_SHIFT)); |
| 84 | if (page) { |
| 85 | hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); |
| 86 | memset((void *)hpt, 0, (1ul << order)); |
| 87 | cma = 1; |
| 88 | } |
| 89 | |
| 90 | if (!hpt) |
| 91 | hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_RETRY_MAYFAIL |
| 92 | |__GFP_NOWARN, order - PAGE_SHIFT); |
| 93 | |
| 94 | if (!hpt) |
| 95 | return -ENOMEM; |
| 96 | |
| 97 | /* HPTEs are 2**4 bytes long */ |
| 98 | npte = 1ul << (order - 4); |
| 99 | |
| 100 | /* Allocate reverse map array */ |
| 101 | rev = vmalloc(array_size(npte, sizeof(struct revmap_entry))); |
| 102 | if (!rev) { |
| 103 | if (cma) |
| 104 | kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT)); |
| 105 | else |
| 106 | free_pages(hpt, order - PAGE_SHIFT); |
| 107 | return -ENOMEM; |
| 108 | } |
| 109 | |
| 110 | info->order = order; |
| 111 | info->virt = hpt; |
| 112 | info->cma = cma; |
| 113 | info->rev = rev; |
| 114 | |
| 115 | return 0; |
| 116 | } |
| 117 | |
| 118 | void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info) |
| 119 | { |
| 120 | atomic64_set(&kvm->arch.mmio_update, 0); |
| 121 | kvm->arch.hpt = *info; |
| 122 | kvm->arch.sdr1 = __pa(info->virt) | (info->order - 18); |
| 123 | |
| 124 | pr_debug("KVM guest htab at %lx (order %ld), LPID %llx\n", |
| 125 | info->virt, (long)info->order, kvm->arch.lpid); |
| 126 | } |
| 127 | |
| 128 | int kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) |
| 129 | { |
| 130 | int err = -EBUSY; |
| 131 | struct kvm_hpt_info info; |
| 132 | |
| 133 | mutex_lock(&kvm->arch.mmu_setup_lock); |
| 134 | if (kvm->arch.mmu_ready) { |
| 135 | kvm->arch.mmu_ready = 0; |
| 136 | /* order mmu_ready vs. vcpus_running */ |
| 137 | smp_mb(); |
| 138 | if (atomic_read(&kvm->arch.vcpus_running)) { |
| 139 | kvm->arch.mmu_ready = 1; |
| 140 | goto out; |
| 141 | } |
| 142 | } |
| 143 | if (kvm_is_radix(kvm)) { |
| 144 | err = kvmppc_switch_mmu_to_hpt(kvm); |
| 145 | if (err) |
| 146 | goto out; |
| 147 | } |
| 148 | |
| 149 | if (kvm->arch.hpt.order == order) { |
| 150 | /* We already have a suitable HPT */ |
| 151 | |
| 152 | /* Set the entire HPT to 0, i.e. invalid HPTEs */ |
| 153 | memset((void *)kvm->arch.hpt.virt, 0, 1ul << order); |
| 154 | /* |
| 155 | * Reset all the reverse-mapping chains for all memslots |
| 156 | */ |
| 157 | kvmppc_rmap_reset(kvm); |
| 158 | err = 0; |
| 159 | goto out; |
| 160 | } |
| 161 | |
| 162 | if (kvm->arch.hpt.virt) { |
| 163 | kvmppc_free_hpt(&kvm->arch.hpt); |
| 164 | kvmppc_rmap_reset(kvm); |
| 165 | } |
| 166 | |
| 167 | err = kvmppc_allocate_hpt(&info, order); |
| 168 | if (err < 0) |
| 169 | goto out; |
| 170 | kvmppc_set_hpt(kvm, &info); |
| 171 | |
| 172 | out: |
| 173 | if (err == 0) |
| 174 | /* Ensure that each vcpu will flush its TLB on next entry. */ |
| 175 | cpumask_setall(&kvm->arch.need_tlb_flush); |
| 176 | |
| 177 | mutex_unlock(&kvm->arch.mmu_setup_lock); |
| 178 | return err; |
| 179 | } |
| 180 | |
| 181 | void kvmppc_free_hpt(struct kvm_hpt_info *info) |
| 182 | { |
| 183 | vfree(info->rev); |
| 184 | info->rev = NULL; |
| 185 | if (info->cma) |
| 186 | kvm_free_hpt_cma(virt_to_page((void *)info->virt), |
| 187 | 1 << (info->order - PAGE_SHIFT)); |
| 188 | else if (info->virt) |
| 189 | free_pages(info->virt, info->order - PAGE_SHIFT); |
| 190 | info->virt = 0; |
| 191 | info->order = 0; |
| 192 | } |
| 193 | |
| 194 | /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */ |
| 195 | static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize) |
| 196 | { |
| 197 | return (pgsize > 0x1000) ? HPTE_V_LARGE : 0; |
| 198 | } |
| 199 | |
| 200 | /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */ |
| 201 | static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize) |
| 202 | { |
| 203 | return (pgsize == 0x10000) ? 0x1000 : 0; |
| 204 | } |
| 205 | |
| 206 | void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, |
| 207 | unsigned long porder) |
| 208 | { |
| 209 | unsigned long i; |
| 210 | unsigned long npages; |
| 211 | unsigned long hp_v, hp_r; |
| 212 | unsigned long addr, hash; |
| 213 | unsigned long psize; |
| 214 | unsigned long hp0, hp1; |
| 215 | unsigned long idx_ret; |
| 216 | long ret; |
| 217 | struct kvm *kvm = vcpu->kvm; |
| 218 | |
| 219 | psize = 1ul << porder; |
| 220 | npages = memslot->npages >> (porder - PAGE_SHIFT); |
| 221 | |
| 222 | /* VRMA can't be > 1TB */ |
| 223 | if (npages > 1ul << (40 - porder)) |
| 224 | npages = 1ul << (40 - porder); |
| 225 | /* Can't use more than 1 HPTE per HPTEG */ |
| 226 | if (npages > kvmppc_hpt_mask(&kvm->arch.hpt) + 1) |
| 227 | npages = kvmppc_hpt_mask(&kvm->arch.hpt) + 1; |
| 228 | |
| 229 | hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) | |
| 230 | HPTE_V_BOLTED | hpte0_pgsize_encoding(psize); |
| 231 | hp1 = hpte1_pgsize_encoding(psize) | |
| 232 | HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX; |
| 233 | |
| 234 | for (i = 0; i < npages; ++i) { |
| 235 | addr = i << porder; |
| 236 | /* can't use hpt_hash since va > 64 bits */ |
| 237 | hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) |
| 238 | & kvmppc_hpt_mask(&kvm->arch.hpt); |
| 239 | /* |
| 240 | * We assume that the hash table is empty and no |
| 241 | * vcpus are using it at this stage. Since we create |
| 242 | * at most one HPTE per HPTEG, we just assume entry 7 |
| 243 | * is available and use it. |
| 244 | */ |
| 245 | hash = (hash << 3) + 7; |
| 246 | hp_v = hp0 | ((addr >> 16) & ~0x7fUL); |
| 247 | hp_r = hp1 | addr; |
| 248 | ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, hash, hp_v, hp_r, |
| 249 | &idx_ret); |
| 250 | if (ret != H_SUCCESS) { |
| 251 | pr_err("KVM: map_vrma at %lx failed, ret=%ld\n", |
| 252 | addr, ret); |
| 253 | break; |
| 254 | } |
| 255 | } |
| 256 | } |
| 257 | |
| 258 | int kvmppc_mmu_hv_init(void) |
| 259 | { |
| 260 | unsigned long nr_lpids; |
| 261 | |
| 262 | if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE)) |
| 263 | return -EINVAL; |
| 264 | |
| 265 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
| 266 | if (WARN_ON(mfspr(SPRN_LPID) != 0)) |
| 267 | return -EINVAL; |
| 268 | nr_lpids = 1UL << mmu_lpid_bits; |
| 269 | } else { |
| 270 | nr_lpids = 1UL << KVM_MAX_NESTED_GUESTS_SHIFT; |
| 271 | } |
| 272 | |
| 273 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) { |
| 274 | /* POWER7 has 10-bit LPIDs, POWER8 has 12-bit LPIDs */ |
| 275 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
| 276 | WARN_ON(nr_lpids != 1UL << 12); |
| 277 | else |
| 278 | WARN_ON(nr_lpids != 1UL << 10); |
| 279 | |
| 280 | /* |
| 281 | * Reserve the last implemented LPID use in partition |
| 282 | * switching for POWER7 and POWER8. |
| 283 | */ |
| 284 | nr_lpids -= 1; |
| 285 | } |
| 286 | |
| 287 | kvmppc_init_lpid(nr_lpids); |
| 288 | |
| 289 | return 0; |
| 290 | } |
| 291 | |
| 292 | static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, |
| 293 | long pte_index, unsigned long pteh, |
| 294 | unsigned long ptel, unsigned long *pte_idx_ret) |
| 295 | { |
| 296 | long ret; |
| 297 | |
| 298 | preempt_disable(); |
| 299 | ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel, |
| 300 | kvm->mm->pgd, false, pte_idx_ret); |
| 301 | preempt_enable(); |
| 302 | if (ret == H_TOO_HARD) { |
| 303 | /* this can't happen */ |
| 304 | pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n"); |
| 305 | ret = H_RESOURCE; /* or something */ |
| 306 | } |
| 307 | return ret; |
| 308 | |
| 309 | } |
| 310 | |
| 311 | static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu, |
| 312 | gva_t eaddr) |
| 313 | { |
| 314 | u64 mask; |
| 315 | int i; |
| 316 | |
| 317 | for (i = 0; i < vcpu->arch.slb_nr; i++) { |
| 318 | if (!(vcpu->arch.slb[i].orige & SLB_ESID_V)) |
| 319 | continue; |
| 320 | |
| 321 | if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T) |
| 322 | mask = ESID_MASK_1T; |
| 323 | else |
| 324 | mask = ESID_MASK; |
| 325 | |
| 326 | if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0) |
| 327 | return &vcpu->arch.slb[i]; |
| 328 | } |
| 329 | return NULL; |
| 330 | } |
| 331 | |
| 332 | static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r, |
| 333 | unsigned long ea) |
| 334 | { |
| 335 | unsigned long ra_mask; |
| 336 | |
| 337 | ra_mask = kvmppc_actual_pgsz(v, r) - 1; |
| 338 | return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask); |
| 339 | } |
| 340 | |
| 341 | static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, |
| 342 | struct kvmppc_pte *gpte, bool data, bool iswrite) |
| 343 | { |
| 344 | struct kvm *kvm = vcpu->kvm; |
| 345 | struct kvmppc_slb *slbe; |
| 346 | unsigned long slb_v; |
| 347 | unsigned long pp, key; |
| 348 | unsigned long v, orig_v, gr; |
| 349 | __be64 *hptep; |
| 350 | long int index; |
| 351 | int virtmode = __kvmppc_get_msr_hv(vcpu) & (data ? MSR_DR : MSR_IR); |
| 352 | |
| 353 | if (kvm_is_radix(vcpu->kvm)) |
| 354 | return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite); |
| 355 | |
| 356 | /* Get SLB entry */ |
| 357 | if (virtmode) { |
| 358 | slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr); |
| 359 | if (!slbe) |
| 360 | return -EINVAL; |
| 361 | slb_v = slbe->origv; |
| 362 | } else { |
| 363 | /* real mode access */ |
| 364 | slb_v = vcpu->kvm->arch.vrma_slb_v; |
| 365 | } |
| 366 | |
| 367 | preempt_disable(); |
| 368 | /* Find the HPTE in the hash table */ |
| 369 | index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, |
| 370 | HPTE_V_VALID | HPTE_V_ABSENT); |
| 371 | if (index < 0) { |
| 372 | preempt_enable(); |
| 373 | return -ENOENT; |
| 374 | } |
| 375 | hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); |
| 376 | v = orig_v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; |
| 377 | if (cpu_has_feature(CPU_FTR_ARCH_300)) |
| 378 | v = hpte_new_to_old_v(v, be64_to_cpu(hptep[1])); |
| 379 | gr = kvm->arch.hpt.rev[index].guest_rpte; |
| 380 | |
| 381 | unlock_hpte(hptep, orig_v); |
| 382 | preempt_enable(); |
| 383 | |
| 384 | gpte->eaddr = eaddr; |
| 385 | gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); |
| 386 | |
| 387 | /* Get PP bits and key for permission check */ |
| 388 | pp = gr & (HPTE_R_PP0 | HPTE_R_PP); |
| 389 | key = (__kvmppc_get_msr_hv(vcpu) & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; |
| 390 | key &= slb_v; |
| 391 | |
| 392 | /* Calculate permissions */ |
| 393 | gpte->may_read = hpte_read_permission(pp, key); |
| 394 | gpte->may_write = hpte_write_permission(pp, key); |
| 395 | gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G)); |
| 396 | |
| 397 | /* Storage key permission check for POWER7 */ |
| 398 | if (data && virtmode) { |
| 399 | int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr); |
| 400 | if (amrfield & 1) |
| 401 | gpte->may_read = 0; |
| 402 | if (amrfield & 2) |
| 403 | gpte->may_write = 0; |
| 404 | } |
| 405 | |
| 406 | /* Get the guest physical address */ |
| 407 | gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr); |
| 408 | return 0; |
| 409 | } |
| 410 | |
| 411 | /* |
| 412 | * Quick test for whether an instruction is a load or a store. |
| 413 | * If the instruction is a load or a store, then this will indicate |
| 414 | * which it is, at least on server processors. (Embedded processors |
| 415 | * have some external PID instructions that don't follow the rule |
| 416 | * embodied here.) If the instruction isn't a load or store, then |
| 417 | * this doesn't return anything useful. |
| 418 | */ |
| 419 | static int instruction_is_store(ppc_inst_t instr) |
| 420 | { |
| 421 | unsigned int mask; |
| 422 | unsigned int suffix; |
| 423 | |
| 424 | mask = 0x10000000; |
| 425 | suffix = ppc_inst_val(instr); |
| 426 | if (ppc_inst_prefixed(instr)) |
| 427 | suffix = ppc_inst_suffix(instr); |
| 428 | else if ((suffix & 0xfc000000) == 0x7c000000) |
| 429 | mask = 0x100; /* major opcode 31 */ |
| 430 | return (suffix & mask) != 0; |
| 431 | } |
| 432 | |
| 433 | int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, |
| 434 | unsigned long gpa, gva_t ea, int is_store) |
| 435 | { |
| 436 | ppc_inst_t last_inst; |
| 437 | bool is_prefixed = !!(kvmppc_get_msr(vcpu) & SRR1_PREFIXED); |
| 438 | |
| 439 | /* |
| 440 | * Fast path - check if the guest physical address corresponds to a |
| 441 | * device on the FAST_MMIO_BUS, if so we can avoid loading the |
| 442 | * instruction all together, then we can just handle it and return. |
| 443 | */ |
| 444 | if (is_store) { |
| 445 | int idx, ret; |
| 446 | |
| 447 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 448 | ret = kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, (gpa_t) gpa, 0, |
| 449 | NULL); |
| 450 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
| 451 | if (!ret) { |
| 452 | kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + (is_prefixed ? 8 : 4)); |
| 453 | return RESUME_GUEST; |
| 454 | } |
| 455 | } |
| 456 | |
| 457 | /* |
| 458 | * If we fail, we just return to the guest and try executing it again. |
| 459 | */ |
| 460 | if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != |
| 461 | EMULATE_DONE) |
| 462 | return RESUME_GUEST; |
| 463 | |
| 464 | /* |
| 465 | * WARNING: We do not know for sure whether the instruction we just |
| 466 | * read from memory is the same that caused the fault in the first |
| 467 | * place. |
| 468 | * |
| 469 | * If the fault is prefixed but the instruction is not or vice |
| 470 | * versa, try again so that we don't advance pc the wrong amount. |
| 471 | */ |
| 472 | if (ppc_inst_prefixed(last_inst) != is_prefixed) |
| 473 | return RESUME_GUEST; |
| 474 | |
| 475 | /* |
| 476 | * If the instruction we read is neither an load or a store, |
| 477 | * then it can't access memory, so we don't need to worry about |
| 478 | * enforcing access permissions. So, assuming it is a load or |
| 479 | * store, we just check that its direction (load or store) is |
| 480 | * consistent with the original fault, since that's what we |
| 481 | * checked the access permissions against. If there is a mismatch |
| 482 | * we just return and retry the instruction. |
| 483 | */ |
| 484 | |
| 485 | if (instruction_is_store(last_inst) != !!is_store) |
| 486 | return RESUME_GUEST; |
| 487 | |
| 488 | /* |
| 489 | * Emulated accesses are emulated by looking at the hash for |
| 490 | * translation once, then performing the access later. The |
| 491 | * translation could be invalidated in the meantime in which |
| 492 | * point performing the subsequent memory access on the old |
| 493 | * physical address could possibly be a security hole for the |
| 494 | * guest (but not the host). |
| 495 | * |
| 496 | * This is less of an issue for MMIO stores since they aren't |
| 497 | * globally visible. It could be an issue for MMIO loads to |
| 498 | * a certain extent but we'll ignore it for now. |
| 499 | */ |
| 500 | |
| 501 | vcpu->arch.paddr_accessed = gpa; |
| 502 | vcpu->arch.vaddr_accessed = ea; |
| 503 | return kvmppc_emulate_mmio(vcpu); |
| 504 | } |
| 505 | |
| 506 | int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu, |
| 507 | unsigned long ea, unsigned long dsisr) |
| 508 | { |
| 509 | struct kvm *kvm = vcpu->kvm; |
| 510 | unsigned long hpte[3], r; |
| 511 | unsigned long hnow_v, hnow_r; |
| 512 | __be64 *hptep; |
| 513 | unsigned long mmu_seq, psize, pte_size; |
| 514 | unsigned long gpa_base, gfn_base; |
| 515 | unsigned long gpa, gfn, hva, pfn, hpa; |
| 516 | struct kvm_memory_slot *memslot; |
| 517 | unsigned long *rmap; |
| 518 | struct revmap_entry *rev; |
| 519 | struct page *page; |
| 520 | long index, ret; |
| 521 | bool is_ci; |
| 522 | bool writing, write_ok; |
| 523 | unsigned int shift; |
| 524 | unsigned long rcbits; |
| 525 | long mmio_update; |
| 526 | pte_t pte, *ptep; |
| 527 | |
| 528 | if (kvm_is_radix(kvm)) |
| 529 | return kvmppc_book3s_radix_page_fault(vcpu, ea, dsisr); |
| 530 | |
| 531 | /* |
| 532 | * Real-mode code has already searched the HPT and found the |
| 533 | * entry we're interested in. Lock the entry and check that |
| 534 | * it hasn't changed. If it has, just return and re-execute the |
| 535 | * instruction. |
| 536 | */ |
| 537 | if (ea != vcpu->arch.pgfault_addr) |
| 538 | return RESUME_GUEST; |
| 539 | |
| 540 | if (vcpu->arch.pgfault_cache) { |
| 541 | mmio_update = atomic64_read(&kvm->arch.mmio_update); |
| 542 | if (mmio_update == vcpu->arch.pgfault_cache->mmio_update) { |
| 543 | r = vcpu->arch.pgfault_cache->rpte; |
| 544 | psize = kvmppc_actual_pgsz(vcpu->arch.pgfault_hpte[0], |
| 545 | r); |
| 546 | gpa_base = r & HPTE_R_RPN & ~(psize - 1); |
| 547 | gfn_base = gpa_base >> PAGE_SHIFT; |
| 548 | gpa = gpa_base | (ea & (psize - 1)); |
| 549 | return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, |
| 550 | dsisr & DSISR_ISSTORE); |
| 551 | } |
| 552 | } |
| 553 | index = vcpu->arch.pgfault_index; |
| 554 | hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); |
| 555 | rev = &kvm->arch.hpt.rev[index]; |
| 556 | preempt_disable(); |
| 557 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) |
| 558 | cpu_relax(); |
| 559 | hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; |
| 560 | hpte[1] = be64_to_cpu(hptep[1]); |
| 561 | hpte[2] = r = rev->guest_rpte; |
| 562 | unlock_hpte(hptep, hpte[0]); |
| 563 | preempt_enable(); |
| 564 | |
| 565 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
| 566 | hpte[0] = hpte_new_to_old_v(hpte[0], hpte[1]); |
| 567 | hpte[1] = hpte_new_to_old_r(hpte[1]); |
| 568 | } |
| 569 | if (hpte[0] != vcpu->arch.pgfault_hpte[0] || |
| 570 | hpte[1] != vcpu->arch.pgfault_hpte[1]) |
| 571 | return RESUME_GUEST; |
| 572 | |
| 573 | /* Translate the logical address and get the page */ |
| 574 | psize = kvmppc_actual_pgsz(hpte[0], r); |
| 575 | gpa_base = r & HPTE_R_RPN & ~(psize - 1); |
| 576 | gfn_base = gpa_base >> PAGE_SHIFT; |
| 577 | gpa = gpa_base | (ea & (psize - 1)); |
| 578 | gfn = gpa >> PAGE_SHIFT; |
| 579 | memslot = gfn_to_memslot(kvm, gfn); |
| 580 | |
| 581 | trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr); |
| 582 | |
| 583 | /* No memslot means it's an emulated MMIO region */ |
| 584 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) |
| 585 | return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, |
| 586 | dsisr & DSISR_ISSTORE); |
| 587 | |
| 588 | /* |
| 589 | * This should never happen, because of the slot_is_aligned() |
| 590 | * check in kvmppc_do_h_enter(). |
| 591 | */ |
| 592 | if (gfn_base < memslot->base_gfn) |
| 593 | return -EFAULT; |
| 594 | |
| 595 | /* used to check for invalidations in progress */ |
| 596 | mmu_seq = kvm->mmu_invalidate_seq; |
| 597 | smp_rmb(); |
| 598 | |
| 599 | ret = -EFAULT; |
| 600 | page = NULL; |
| 601 | writing = (dsisr & DSISR_ISSTORE) != 0; |
| 602 | /* If writing != 0, then the HPTE must allow writing, if we get here */ |
| 603 | write_ok = writing; |
| 604 | hva = gfn_to_hva_memslot(memslot, gfn); |
| 605 | |
| 606 | /* |
| 607 | * Do a fast check first, since __gfn_to_pfn_memslot doesn't |
| 608 | * do it with !atomic && !async, which is how we call it. |
| 609 | * We always ask for write permission since the common case |
| 610 | * is that the page is writable. |
| 611 | */ |
| 612 | if (get_user_page_fast_only(hva, FOLL_WRITE, &page)) { |
| 613 | write_ok = true; |
| 614 | } else { |
| 615 | /* Call KVM generic code to do the slow-path check */ |
| 616 | pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, |
| 617 | writing, &write_ok, NULL); |
| 618 | if (is_error_noslot_pfn(pfn)) |
| 619 | return -EFAULT; |
| 620 | page = NULL; |
| 621 | if (pfn_valid(pfn)) { |
| 622 | page = pfn_to_page(pfn); |
| 623 | if (PageReserved(page)) |
| 624 | page = NULL; |
| 625 | } |
| 626 | } |
| 627 | |
| 628 | /* |
| 629 | * Read the PTE from the process' radix tree and use that |
| 630 | * so we get the shift and attribute bits. |
| 631 | */ |
| 632 | spin_lock(&kvm->mmu_lock); |
| 633 | ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift); |
| 634 | pte = __pte(0); |
| 635 | if (ptep) |
| 636 | pte = READ_ONCE(*ptep); |
| 637 | spin_unlock(&kvm->mmu_lock); |
| 638 | /* |
| 639 | * If the PTE disappeared temporarily due to a THP |
| 640 | * collapse, just return and let the guest try again. |
| 641 | */ |
| 642 | if (!pte_present(pte)) { |
| 643 | if (page) |
| 644 | put_page(page); |
| 645 | return RESUME_GUEST; |
| 646 | } |
| 647 | hpa = pte_pfn(pte) << PAGE_SHIFT; |
| 648 | pte_size = PAGE_SIZE; |
| 649 | if (shift) |
| 650 | pte_size = 1ul << shift; |
| 651 | is_ci = pte_ci(pte); |
| 652 | |
| 653 | if (psize > pte_size) |
| 654 | goto out_put; |
| 655 | if (pte_size > psize) |
| 656 | hpa |= hva & (pte_size - psize); |
| 657 | |
| 658 | /* Check WIMG vs. the actual page we're accessing */ |
| 659 | if (!hpte_cache_flags_ok(r, is_ci)) { |
| 660 | if (is_ci) |
| 661 | goto out_put; |
| 662 | /* |
| 663 | * Allow guest to map emulated device memory as |
| 664 | * uncacheable, but actually make it cacheable. |
| 665 | */ |
| 666 | r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; |
| 667 | } |
| 668 | |
| 669 | /* |
| 670 | * Set the HPTE to point to hpa. |
| 671 | * Since the hpa is at PAGE_SIZE granularity, make sure we |
| 672 | * don't mask out lower-order bits if psize < PAGE_SIZE. |
| 673 | */ |
| 674 | if (psize < PAGE_SIZE) |
| 675 | psize = PAGE_SIZE; |
| 676 | r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) | hpa; |
| 677 | if (hpte_is_writable(r) && !write_ok) |
| 678 | r = hpte_make_readonly(r); |
| 679 | ret = RESUME_GUEST; |
| 680 | preempt_disable(); |
| 681 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) |
| 682 | cpu_relax(); |
| 683 | hnow_v = be64_to_cpu(hptep[0]); |
| 684 | hnow_r = be64_to_cpu(hptep[1]); |
| 685 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
| 686 | hnow_v = hpte_new_to_old_v(hnow_v, hnow_r); |
| 687 | hnow_r = hpte_new_to_old_r(hnow_r); |
| 688 | } |
| 689 | |
| 690 | /* |
| 691 | * If the HPT is being resized, don't update the HPTE, |
| 692 | * instead let the guest retry after the resize operation is complete. |
| 693 | * The synchronization for mmu_ready test vs. set is provided |
| 694 | * by the HPTE lock. |
| 695 | */ |
| 696 | if (!kvm->arch.mmu_ready) |
| 697 | goto out_unlock; |
| 698 | |
| 699 | if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] || |
| 700 | rev->guest_rpte != hpte[2]) |
| 701 | /* HPTE has been changed under us; let the guest retry */ |
| 702 | goto out_unlock; |
| 703 | hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; |
| 704 | |
| 705 | /* Always put the HPTE in the rmap chain for the page base address */ |
| 706 | rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; |
| 707 | lock_rmap(rmap); |
| 708 | |
| 709 | /* Check if we might have been invalidated; let the guest retry if so */ |
| 710 | ret = RESUME_GUEST; |
| 711 | if (mmu_invalidate_retry(vcpu->kvm, mmu_seq)) { |
| 712 | unlock_rmap(rmap); |
| 713 | goto out_unlock; |
| 714 | } |
| 715 | |
| 716 | /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */ |
| 717 | rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; |
| 718 | r &= rcbits | ~(HPTE_R_R | HPTE_R_C); |
| 719 | |
| 720 | if (be64_to_cpu(hptep[0]) & HPTE_V_VALID) { |
| 721 | /* HPTE was previously valid, so we need to invalidate it */ |
| 722 | unlock_rmap(rmap); |
| 723 | hptep[0] |= cpu_to_be64(HPTE_V_ABSENT); |
| 724 | kvmppc_invalidate_hpte(kvm, hptep, index); |
| 725 | /* don't lose previous R and C bits */ |
| 726 | r |= be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C); |
| 727 | } else { |
| 728 | kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0); |
| 729 | } |
| 730 | |
| 731 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
| 732 | r = hpte_old_to_new_r(hpte[0], r); |
| 733 | hpte[0] = hpte_old_to_new_v(hpte[0]); |
| 734 | } |
| 735 | hptep[1] = cpu_to_be64(r); |
| 736 | eieio(); |
| 737 | __unlock_hpte(hptep, hpte[0]); |
| 738 | asm volatile("ptesync" : : : "memory"); |
| 739 | preempt_enable(); |
| 740 | if (page && hpte_is_writable(r)) |
| 741 | set_page_dirty_lock(page); |
| 742 | |
| 743 | out_put: |
| 744 | trace_kvm_page_fault_exit(vcpu, hpte, ret); |
| 745 | |
| 746 | if (page) |
| 747 | put_page(page); |
| 748 | return ret; |
| 749 | |
| 750 | out_unlock: |
| 751 | __unlock_hpte(hptep, be64_to_cpu(hptep[0])); |
| 752 | preempt_enable(); |
| 753 | goto out_put; |
| 754 | } |
| 755 | |
| 756 | void kvmppc_rmap_reset(struct kvm *kvm) |
| 757 | { |
| 758 | struct kvm_memslots *slots; |
| 759 | struct kvm_memory_slot *memslot; |
| 760 | int srcu_idx, bkt; |
| 761 | |
| 762 | srcu_idx = srcu_read_lock(&kvm->srcu); |
| 763 | slots = kvm_memslots(kvm); |
| 764 | kvm_for_each_memslot(memslot, bkt, slots) { |
| 765 | /* Mutual exclusion with kvm_unmap_hva_range etc. */ |
| 766 | spin_lock(&kvm->mmu_lock); |
| 767 | /* |
| 768 | * This assumes it is acceptable to lose reference and |
| 769 | * change bits across a reset. |
| 770 | */ |
| 771 | memset(memslot->arch.rmap, 0, |
| 772 | memslot->npages * sizeof(*memslot->arch.rmap)); |
| 773 | spin_unlock(&kvm->mmu_lock); |
| 774 | } |
| 775 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
| 776 | } |
| 777 | |
| 778 | /* Must be called with both HPTE and rmap locked */ |
| 779 | static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i, |
| 780 | struct kvm_memory_slot *memslot, |
| 781 | unsigned long *rmapp, unsigned long gfn) |
| 782 | { |
| 783 | __be64 *hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); |
| 784 | struct revmap_entry *rev = kvm->arch.hpt.rev; |
| 785 | unsigned long j, h; |
| 786 | unsigned long ptel, psize, rcbits; |
| 787 | |
| 788 | j = rev[i].forw; |
| 789 | if (j == i) { |
| 790 | /* chain is now empty */ |
| 791 | *rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX); |
| 792 | } else { |
| 793 | /* remove i from chain */ |
| 794 | h = rev[i].back; |
| 795 | rev[h].forw = j; |
| 796 | rev[j].back = h; |
| 797 | rev[i].forw = rev[i].back = i; |
| 798 | *rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j; |
| 799 | } |
| 800 | |
| 801 | /* Now check and modify the HPTE */ |
| 802 | ptel = rev[i].guest_rpte; |
| 803 | psize = kvmppc_actual_pgsz(be64_to_cpu(hptep[0]), ptel); |
| 804 | if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) && |
| 805 | hpte_rpn(ptel, psize) == gfn) { |
| 806 | hptep[0] |= cpu_to_be64(HPTE_V_ABSENT); |
| 807 | kvmppc_invalidate_hpte(kvm, hptep, i); |
| 808 | hptep[1] &= ~cpu_to_be64(HPTE_R_KEY_HI | HPTE_R_KEY_LO); |
| 809 | /* Harvest R and C */ |
| 810 | rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C); |
| 811 | *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT; |
| 812 | if ((rcbits & HPTE_R_C) && memslot->dirty_bitmap) |
| 813 | kvmppc_update_dirty_map(memslot, gfn, psize); |
| 814 | if (rcbits & ~rev[i].guest_rpte) { |
| 815 | rev[i].guest_rpte = ptel | rcbits; |
| 816 | note_hpte_modification(kvm, &rev[i]); |
| 817 | } |
| 818 | } |
| 819 | } |
| 820 | |
| 821 | static void kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, |
| 822 | unsigned long gfn) |
| 823 | { |
| 824 | unsigned long i; |
| 825 | __be64 *hptep; |
| 826 | unsigned long *rmapp; |
| 827 | |
| 828 | rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; |
| 829 | for (;;) { |
| 830 | lock_rmap(rmapp); |
| 831 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { |
| 832 | unlock_rmap(rmapp); |
| 833 | break; |
| 834 | } |
| 835 | |
| 836 | /* |
| 837 | * To avoid an ABBA deadlock with the HPTE lock bit, |
| 838 | * we can't spin on the HPTE lock while holding the |
| 839 | * rmap chain lock. |
| 840 | */ |
| 841 | i = *rmapp & KVMPPC_RMAP_INDEX; |
| 842 | hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); |
| 843 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { |
| 844 | /* unlock rmap before spinning on the HPTE lock */ |
| 845 | unlock_rmap(rmapp); |
| 846 | while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK) |
| 847 | cpu_relax(); |
| 848 | continue; |
| 849 | } |
| 850 | |
| 851 | kvmppc_unmap_hpte(kvm, i, memslot, rmapp, gfn); |
| 852 | unlock_rmap(rmapp); |
| 853 | __unlock_hpte(hptep, be64_to_cpu(hptep[0])); |
| 854 | } |
| 855 | } |
| 856 | |
| 857 | bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range) |
| 858 | { |
| 859 | gfn_t gfn; |
| 860 | |
| 861 | if (kvm_is_radix(kvm)) { |
| 862 | for (gfn = range->start; gfn < range->end; gfn++) |
| 863 | kvm_unmap_radix(kvm, range->slot, gfn); |
| 864 | } else { |
| 865 | for (gfn = range->start; gfn < range->end; gfn++) |
| 866 | kvm_unmap_rmapp(kvm, range->slot, gfn); |
| 867 | } |
| 868 | |
| 869 | return false; |
| 870 | } |
| 871 | |
| 872 | void kvmppc_core_flush_memslot_hv(struct kvm *kvm, |
| 873 | struct kvm_memory_slot *memslot) |
| 874 | { |
| 875 | unsigned long gfn; |
| 876 | unsigned long n; |
| 877 | unsigned long *rmapp; |
| 878 | |
| 879 | gfn = memslot->base_gfn; |
| 880 | rmapp = memslot->arch.rmap; |
| 881 | if (kvm_is_radix(kvm)) { |
| 882 | kvmppc_radix_flush_memslot(kvm, memslot); |
| 883 | return; |
| 884 | } |
| 885 | |
| 886 | for (n = memslot->npages; n; --n, ++gfn) { |
| 887 | /* |
| 888 | * Testing the present bit without locking is OK because |
| 889 | * the memslot has been marked invalid already, and hence |
| 890 | * no new HPTEs referencing this page can be created, |
| 891 | * thus the present bit can't go from 0 to 1. |
| 892 | */ |
| 893 | if (*rmapp & KVMPPC_RMAP_PRESENT) |
| 894 | kvm_unmap_rmapp(kvm, memslot, gfn); |
| 895 | ++rmapp; |
| 896 | } |
| 897 | } |
| 898 | |
| 899 | static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, |
| 900 | unsigned long gfn) |
| 901 | { |
| 902 | struct revmap_entry *rev = kvm->arch.hpt.rev; |
| 903 | unsigned long head, i, j; |
| 904 | __be64 *hptep; |
| 905 | bool ret = false; |
| 906 | unsigned long *rmapp; |
| 907 | |
| 908 | rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; |
| 909 | retry: |
| 910 | lock_rmap(rmapp); |
| 911 | if (*rmapp & KVMPPC_RMAP_REFERENCED) { |
| 912 | *rmapp &= ~KVMPPC_RMAP_REFERENCED; |
| 913 | ret = true; |
| 914 | } |
| 915 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { |
| 916 | unlock_rmap(rmapp); |
| 917 | return ret; |
| 918 | } |
| 919 | |
| 920 | i = head = *rmapp & KVMPPC_RMAP_INDEX; |
| 921 | do { |
| 922 | hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); |
| 923 | j = rev[i].forw; |
| 924 | |
| 925 | /* If this HPTE isn't referenced, ignore it */ |
| 926 | if (!(be64_to_cpu(hptep[1]) & HPTE_R_R)) |
| 927 | continue; |
| 928 | |
| 929 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { |
| 930 | /* unlock rmap before spinning on the HPTE lock */ |
| 931 | unlock_rmap(rmapp); |
| 932 | while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK) |
| 933 | cpu_relax(); |
| 934 | goto retry; |
| 935 | } |
| 936 | |
| 937 | /* Now check and modify the HPTE */ |
| 938 | if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) && |
| 939 | (be64_to_cpu(hptep[1]) & HPTE_R_R)) { |
| 940 | kvmppc_clear_ref_hpte(kvm, hptep, i); |
| 941 | if (!(rev[i].guest_rpte & HPTE_R_R)) { |
| 942 | rev[i].guest_rpte |= HPTE_R_R; |
| 943 | note_hpte_modification(kvm, &rev[i]); |
| 944 | } |
| 945 | ret = true; |
| 946 | } |
| 947 | __unlock_hpte(hptep, be64_to_cpu(hptep[0])); |
| 948 | } while ((i = j) != head); |
| 949 | |
| 950 | unlock_rmap(rmapp); |
| 951 | return ret; |
| 952 | } |
| 953 | |
| 954 | bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) |
| 955 | { |
| 956 | gfn_t gfn; |
| 957 | bool ret = false; |
| 958 | |
| 959 | if (kvm_is_radix(kvm)) { |
| 960 | for (gfn = range->start; gfn < range->end; gfn++) |
| 961 | ret |= kvm_age_radix(kvm, range->slot, gfn); |
| 962 | } else { |
| 963 | for (gfn = range->start; gfn < range->end; gfn++) |
| 964 | ret |= kvm_age_rmapp(kvm, range->slot, gfn); |
| 965 | } |
| 966 | |
| 967 | return ret; |
| 968 | } |
| 969 | |
| 970 | static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, |
| 971 | unsigned long gfn) |
| 972 | { |
| 973 | struct revmap_entry *rev = kvm->arch.hpt.rev; |
| 974 | unsigned long head, i, j; |
| 975 | unsigned long *hp; |
| 976 | bool ret = true; |
| 977 | unsigned long *rmapp; |
| 978 | |
| 979 | rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; |
| 980 | if (*rmapp & KVMPPC_RMAP_REFERENCED) |
| 981 | return true; |
| 982 | |
| 983 | lock_rmap(rmapp); |
| 984 | if (*rmapp & KVMPPC_RMAP_REFERENCED) |
| 985 | goto out; |
| 986 | |
| 987 | if (*rmapp & KVMPPC_RMAP_PRESENT) { |
| 988 | i = head = *rmapp & KVMPPC_RMAP_INDEX; |
| 989 | do { |
| 990 | hp = (unsigned long *)(kvm->arch.hpt.virt + (i << 4)); |
| 991 | j = rev[i].forw; |
| 992 | if (be64_to_cpu(hp[1]) & HPTE_R_R) |
| 993 | goto out; |
| 994 | } while ((i = j) != head); |
| 995 | } |
| 996 | ret = false; |
| 997 | |
| 998 | out: |
| 999 | unlock_rmap(rmapp); |
| 1000 | return ret; |
| 1001 | } |
| 1002 | |
| 1003 | bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) |
| 1004 | { |
| 1005 | WARN_ON(range->start + 1 != range->end); |
| 1006 | |
| 1007 | if (kvm_is_radix(kvm)) |
| 1008 | return kvm_test_age_radix(kvm, range->slot, range->start); |
| 1009 | else |
| 1010 | return kvm_test_age_rmapp(kvm, range->slot, range->start); |
| 1011 | } |
| 1012 | |
| 1013 | static int vcpus_running(struct kvm *kvm) |
| 1014 | { |
| 1015 | return atomic_read(&kvm->arch.vcpus_running) != 0; |
| 1016 | } |
| 1017 | |
| 1018 | /* |
| 1019 | * Returns the number of system pages that are dirty. |
| 1020 | * This can be more than 1 if we find a huge-page HPTE. |
| 1021 | */ |
| 1022 | static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) |
| 1023 | { |
| 1024 | struct revmap_entry *rev = kvm->arch.hpt.rev; |
| 1025 | unsigned long head, i, j; |
| 1026 | unsigned long n; |
| 1027 | unsigned long v, r; |
| 1028 | __be64 *hptep; |
| 1029 | int npages_dirty = 0; |
| 1030 | |
| 1031 | retry: |
| 1032 | lock_rmap(rmapp); |
| 1033 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { |
| 1034 | unlock_rmap(rmapp); |
| 1035 | return npages_dirty; |
| 1036 | } |
| 1037 | |
| 1038 | i = head = *rmapp & KVMPPC_RMAP_INDEX; |
| 1039 | do { |
| 1040 | unsigned long hptep1; |
| 1041 | hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); |
| 1042 | j = rev[i].forw; |
| 1043 | |
| 1044 | /* |
| 1045 | * Checking the C (changed) bit here is racy since there |
| 1046 | * is no guarantee about when the hardware writes it back. |
| 1047 | * If the HPTE is not writable then it is stable since the |
| 1048 | * page can't be written to, and we would have done a tlbie |
| 1049 | * (which forces the hardware to complete any writeback) |
| 1050 | * when making the HPTE read-only. |
| 1051 | * If vcpus are running then this call is racy anyway |
| 1052 | * since the page could get dirtied subsequently, so we |
| 1053 | * expect there to be a further call which would pick up |
| 1054 | * any delayed C bit writeback. |
| 1055 | * Otherwise we need to do the tlbie even if C==0 in |
| 1056 | * order to pick up any delayed writeback of C. |
| 1057 | */ |
| 1058 | hptep1 = be64_to_cpu(hptep[1]); |
| 1059 | if (!(hptep1 & HPTE_R_C) && |
| 1060 | (!hpte_is_writable(hptep1) || vcpus_running(kvm))) |
| 1061 | continue; |
| 1062 | |
| 1063 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { |
| 1064 | /* unlock rmap before spinning on the HPTE lock */ |
| 1065 | unlock_rmap(rmapp); |
| 1066 | while (hptep[0] & cpu_to_be64(HPTE_V_HVLOCK)) |
| 1067 | cpu_relax(); |
| 1068 | goto retry; |
| 1069 | } |
| 1070 | |
| 1071 | /* Now check and modify the HPTE */ |
| 1072 | if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID))) { |
| 1073 | __unlock_hpte(hptep, be64_to_cpu(hptep[0])); |
| 1074 | continue; |
| 1075 | } |
| 1076 | |
| 1077 | /* need to make it temporarily absent so C is stable */ |
| 1078 | hptep[0] |= cpu_to_be64(HPTE_V_ABSENT); |
| 1079 | kvmppc_invalidate_hpte(kvm, hptep, i); |
| 1080 | v = be64_to_cpu(hptep[0]); |
| 1081 | r = be64_to_cpu(hptep[1]); |
| 1082 | if (r & HPTE_R_C) { |
| 1083 | hptep[1] = cpu_to_be64(r & ~HPTE_R_C); |
| 1084 | if (!(rev[i].guest_rpte & HPTE_R_C)) { |
| 1085 | rev[i].guest_rpte |= HPTE_R_C; |
| 1086 | note_hpte_modification(kvm, &rev[i]); |
| 1087 | } |
| 1088 | n = kvmppc_actual_pgsz(v, r); |
| 1089 | n = (n + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 1090 | if (n > npages_dirty) |
| 1091 | npages_dirty = n; |
| 1092 | eieio(); |
| 1093 | } |
| 1094 | v &= ~HPTE_V_ABSENT; |
| 1095 | v |= HPTE_V_VALID; |
| 1096 | __unlock_hpte(hptep, v); |
| 1097 | } while ((i = j) != head); |
| 1098 | |
| 1099 | unlock_rmap(rmapp); |
| 1100 | return npages_dirty; |
| 1101 | } |
| 1102 | |
| 1103 | void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa, |
| 1104 | struct kvm_memory_slot *memslot, |
| 1105 | unsigned long *map) |
| 1106 | { |
| 1107 | unsigned long gfn; |
| 1108 | |
| 1109 | if (!vpa->dirty || !vpa->pinned_addr) |
| 1110 | return; |
| 1111 | gfn = vpa->gpa >> PAGE_SHIFT; |
| 1112 | if (gfn < memslot->base_gfn || |
| 1113 | gfn >= memslot->base_gfn + memslot->npages) |
| 1114 | return; |
| 1115 | |
| 1116 | vpa->dirty = false; |
| 1117 | if (map) |
| 1118 | __set_bit_le(gfn - memslot->base_gfn, map); |
| 1119 | } |
| 1120 | |
| 1121 | long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm, |
| 1122 | struct kvm_memory_slot *memslot, unsigned long *map) |
| 1123 | { |
| 1124 | unsigned long i; |
| 1125 | unsigned long *rmapp; |
| 1126 | |
| 1127 | preempt_disable(); |
| 1128 | rmapp = memslot->arch.rmap; |
| 1129 | for (i = 0; i < memslot->npages; ++i) { |
| 1130 | int npages = kvm_test_clear_dirty_npages(kvm, rmapp); |
| 1131 | /* |
| 1132 | * Note that if npages > 0 then i must be a multiple of npages, |
| 1133 | * since we always put huge-page HPTEs in the rmap chain |
| 1134 | * corresponding to their page base address. |
| 1135 | */ |
| 1136 | if (npages) |
| 1137 | set_dirty_bits(map, i, npages); |
| 1138 | ++rmapp; |
| 1139 | } |
| 1140 | preempt_enable(); |
| 1141 | return 0; |
| 1142 | } |
| 1143 | |
| 1144 | void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, |
| 1145 | unsigned long *nb_ret) |
| 1146 | { |
| 1147 | struct kvm_memory_slot *memslot; |
| 1148 | unsigned long gfn = gpa >> PAGE_SHIFT; |
| 1149 | struct page *page, *pages[1]; |
| 1150 | int npages; |
| 1151 | unsigned long hva, offset; |
| 1152 | int srcu_idx; |
| 1153 | |
| 1154 | srcu_idx = srcu_read_lock(&kvm->srcu); |
| 1155 | memslot = gfn_to_memslot(kvm, gfn); |
| 1156 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) |
| 1157 | goto err; |
| 1158 | hva = gfn_to_hva_memslot(memslot, gfn); |
| 1159 | npages = get_user_pages_fast(hva, 1, FOLL_WRITE, pages); |
| 1160 | if (npages < 1) |
| 1161 | goto err; |
| 1162 | page = pages[0]; |
| 1163 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
| 1164 | |
| 1165 | offset = gpa & (PAGE_SIZE - 1); |
| 1166 | if (nb_ret) |
| 1167 | *nb_ret = PAGE_SIZE - offset; |
| 1168 | return page_address(page) + offset; |
| 1169 | |
| 1170 | err: |
| 1171 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
| 1172 | return NULL; |
| 1173 | } |
| 1174 | |
| 1175 | void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa, |
| 1176 | bool dirty) |
| 1177 | { |
| 1178 | struct page *page = virt_to_page(va); |
| 1179 | struct kvm_memory_slot *memslot; |
| 1180 | unsigned long gfn; |
| 1181 | int srcu_idx; |
| 1182 | |
| 1183 | put_page(page); |
| 1184 | |
| 1185 | if (!dirty) |
| 1186 | return; |
| 1187 | |
| 1188 | /* We need to mark this page dirty in the memslot dirty_bitmap, if any */ |
| 1189 | gfn = gpa >> PAGE_SHIFT; |
| 1190 | srcu_idx = srcu_read_lock(&kvm->srcu); |
| 1191 | memslot = gfn_to_memslot(kvm, gfn); |
| 1192 | if (memslot && memslot->dirty_bitmap) |
| 1193 | set_bit_le(gfn - memslot->base_gfn, memslot->dirty_bitmap); |
| 1194 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
| 1195 | } |
| 1196 | |
| 1197 | /* |
| 1198 | * HPT resizing |
| 1199 | */ |
| 1200 | static int resize_hpt_allocate(struct kvm_resize_hpt *resize) |
| 1201 | { |
| 1202 | int rc; |
| 1203 | |
| 1204 | rc = kvmppc_allocate_hpt(&resize->hpt, resize->order); |
| 1205 | if (rc < 0) |
| 1206 | return rc; |
| 1207 | |
| 1208 | resize_hpt_debug(resize, "%s(): HPT @ 0x%lx\n", __func__, |
| 1209 | resize->hpt.virt); |
| 1210 | |
| 1211 | return 0; |
| 1212 | } |
| 1213 | |
| 1214 | static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize, |
| 1215 | unsigned long idx) |
| 1216 | { |
| 1217 | struct kvm *kvm = resize->kvm; |
| 1218 | struct kvm_hpt_info *old = &kvm->arch.hpt; |
| 1219 | struct kvm_hpt_info *new = &resize->hpt; |
| 1220 | unsigned long old_hash_mask = (1ULL << (old->order - 7)) - 1; |
| 1221 | unsigned long new_hash_mask = (1ULL << (new->order - 7)) - 1; |
| 1222 | __be64 *hptep, *new_hptep; |
| 1223 | unsigned long vpte, rpte, guest_rpte; |
| 1224 | int ret; |
| 1225 | struct revmap_entry *rev; |
| 1226 | unsigned long apsize, avpn, pteg, hash; |
| 1227 | unsigned long new_idx, new_pteg, replace_vpte; |
| 1228 | int pshift; |
| 1229 | |
| 1230 | hptep = (__be64 *)(old->virt + (idx << 4)); |
| 1231 | |
| 1232 | /* Guest is stopped, so new HPTEs can't be added or faulted |
| 1233 | * in, only unmapped or altered by host actions. So, it's |
| 1234 | * safe to check this before we take the HPTE lock */ |
| 1235 | vpte = be64_to_cpu(hptep[0]); |
| 1236 | if (!(vpte & HPTE_V_VALID) && !(vpte & HPTE_V_ABSENT)) |
| 1237 | return 0; /* nothing to do */ |
| 1238 | |
| 1239 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) |
| 1240 | cpu_relax(); |
| 1241 | |
| 1242 | vpte = be64_to_cpu(hptep[0]); |
| 1243 | |
| 1244 | ret = 0; |
| 1245 | if (!(vpte & HPTE_V_VALID) && !(vpte & HPTE_V_ABSENT)) |
| 1246 | /* Nothing to do */ |
| 1247 | goto out; |
| 1248 | |
| 1249 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
| 1250 | rpte = be64_to_cpu(hptep[1]); |
| 1251 | vpte = hpte_new_to_old_v(vpte, rpte); |
| 1252 | } |
| 1253 | |
| 1254 | /* Unmap */ |
| 1255 | rev = &old->rev[idx]; |
| 1256 | guest_rpte = rev->guest_rpte; |
| 1257 | |
| 1258 | ret = -EIO; |
| 1259 | apsize = kvmppc_actual_pgsz(vpte, guest_rpte); |
| 1260 | if (!apsize) |
| 1261 | goto out; |
| 1262 | |
| 1263 | if (vpte & HPTE_V_VALID) { |
| 1264 | unsigned long gfn = hpte_rpn(guest_rpte, apsize); |
| 1265 | int srcu_idx = srcu_read_lock(&kvm->srcu); |
| 1266 | struct kvm_memory_slot *memslot = |
| 1267 | __gfn_to_memslot(kvm_memslots(kvm), gfn); |
| 1268 | |
| 1269 | if (memslot) { |
| 1270 | unsigned long *rmapp; |
| 1271 | rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; |
| 1272 | |
| 1273 | lock_rmap(rmapp); |
| 1274 | kvmppc_unmap_hpte(kvm, idx, memslot, rmapp, gfn); |
| 1275 | unlock_rmap(rmapp); |
| 1276 | } |
| 1277 | |
| 1278 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
| 1279 | } |
| 1280 | |
| 1281 | /* Reload PTE after unmap */ |
| 1282 | vpte = be64_to_cpu(hptep[0]); |
| 1283 | BUG_ON(vpte & HPTE_V_VALID); |
| 1284 | BUG_ON(!(vpte & HPTE_V_ABSENT)); |
| 1285 | |
| 1286 | ret = 0; |
| 1287 | if (!(vpte & HPTE_V_BOLTED)) |
| 1288 | goto out; |
| 1289 | |
| 1290 | rpte = be64_to_cpu(hptep[1]); |
| 1291 | |
| 1292 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
| 1293 | vpte = hpte_new_to_old_v(vpte, rpte); |
| 1294 | rpte = hpte_new_to_old_r(rpte); |
| 1295 | } |
| 1296 | |
| 1297 | pshift = kvmppc_hpte_base_page_shift(vpte, rpte); |
| 1298 | avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23); |
| 1299 | pteg = idx / HPTES_PER_GROUP; |
| 1300 | if (vpte & HPTE_V_SECONDARY) |
| 1301 | pteg = ~pteg; |
| 1302 | |
| 1303 | if (!(vpte & HPTE_V_1TB_SEG)) { |
| 1304 | unsigned long offset, vsid; |
| 1305 | |
| 1306 | /* We only have 28 - 23 bits of offset in avpn */ |
| 1307 | offset = (avpn & 0x1f) << 23; |
| 1308 | vsid = avpn >> 5; |
| 1309 | /* We can find more bits from the pteg value */ |
| 1310 | if (pshift < 23) |
| 1311 | offset |= ((vsid ^ pteg) & old_hash_mask) << pshift; |
| 1312 | |
| 1313 | hash = vsid ^ (offset >> pshift); |
| 1314 | } else { |
| 1315 | unsigned long offset, vsid; |
| 1316 | |
| 1317 | /* We only have 40 - 23 bits of seg_off in avpn */ |
| 1318 | offset = (avpn & 0x1ffff) << 23; |
| 1319 | vsid = avpn >> 17; |
| 1320 | if (pshift < 23) |
| 1321 | offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) << pshift; |
| 1322 | |
| 1323 | hash = vsid ^ (vsid << 25) ^ (offset >> pshift); |
| 1324 | } |
| 1325 | |
| 1326 | new_pteg = hash & new_hash_mask; |
| 1327 | if (vpte & HPTE_V_SECONDARY) |
| 1328 | new_pteg = ~hash & new_hash_mask; |
| 1329 | |
| 1330 | new_idx = new_pteg * HPTES_PER_GROUP + (idx % HPTES_PER_GROUP); |
| 1331 | new_hptep = (__be64 *)(new->virt + (new_idx << 4)); |
| 1332 | |
| 1333 | replace_vpte = be64_to_cpu(new_hptep[0]); |
| 1334 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
| 1335 | unsigned long replace_rpte = be64_to_cpu(new_hptep[1]); |
| 1336 | replace_vpte = hpte_new_to_old_v(replace_vpte, replace_rpte); |
| 1337 | } |
| 1338 | |
| 1339 | if (replace_vpte & (HPTE_V_VALID | HPTE_V_ABSENT)) { |
| 1340 | BUG_ON(new->order >= old->order); |
| 1341 | |
| 1342 | if (replace_vpte & HPTE_V_BOLTED) { |
| 1343 | if (vpte & HPTE_V_BOLTED) |
| 1344 | /* Bolted collision, nothing we can do */ |
| 1345 | ret = -ENOSPC; |
| 1346 | /* Discard the new HPTE */ |
| 1347 | goto out; |
| 1348 | } |
| 1349 | |
| 1350 | /* Discard the previous HPTE */ |
| 1351 | } |
| 1352 | |
| 1353 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
| 1354 | rpte = hpte_old_to_new_r(vpte, rpte); |
| 1355 | vpte = hpte_old_to_new_v(vpte); |
| 1356 | } |
| 1357 | |
| 1358 | new_hptep[1] = cpu_to_be64(rpte); |
| 1359 | new->rev[new_idx].guest_rpte = guest_rpte; |
| 1360 | /* No need for a barrier, since new HPT isn't active */ |
| 1361 | new_hptep[0] = cpu_to_be64(vpte); |
| 1362 | unlock_hpte(new_hptep, vpte); |
| 1363 | |
| 1364 | out: |
| 1365 | unlock_hpte(hptep, vpte); |
| 1366 | return ret; |
| 1367 | } |
| 1368 | |
| 1369 | static int resize_hpt_rehash(struct kvm_resize_hpt *resize) |
| 1370 | { |
| 1371 | struct kvm *kvm = resize->kvm; |
| 1372 | unsigned long i; |
| 1373 | int rc; |
| 1374 | |
| 1375 | for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) { |
| 1376 | rc = resize_hpt_rehash_hpte(resize, i); |
| 1377 | if (rc != 0) |
| 1378 | return rc; |
| 1379 | } |
| 1380 | |
| 1381 | return 0; |
| 1382 | } |
| 1383 | |
| 1384 | static void resize_hpt_pivot(struct kvm_resize_hpt *resize) |
| 1385 | { |
| 1386 | struct kvm *kvm = resize->kvm; |
| 1387 | struct kvm_hpt_info hpt_tmp; |
| 1388 | |
| 1389 | /* Exchange the pending tables in the resize structure with |
| 1390 | * the active tables */ |
| 1391 | |
| 1392 | resize_hpt_debug(resize, "resize_hpt_pivot()\n"); |
| 1393 | |
| 1394 | spin_lock(&kvm->mmu_lock); |
| 1395 | asm volatile("ptesync" : : : "memory"); |
| 1396 | |
| 1397 | hpt_tmp = kvm->arch.hpt; |
| 1398 | kvmppc_set_hpt(kvm, &resize->hpt); |
| 1399 | resize->hpt = hpt_tmp; |
| 1400 | |
| 1401 | spin_unlock(&kvm->mmu_lock); |
| 1402 | |
| 1403 | synchronize_srcu_expedited(&kvm->srcu); |
| 1404 | |
| 1405 | if (cpu_has_feature(CPU_FTR_ARCH_300)) |
| 1406 | kvmppc_setup_partition_table(kvm); |
| 1407 | |
| 1408 | resize_hpt_debug(resize, "resize_hpt_pivot() done\n"); |
| 1409 | } |
| 1410 | |
| 1411 | static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) |
| 1412 | { |
| 1413 | if (WARN_ON(!mutex_is_locked(&kvm->arch.mmu_setup_lock))) |
| 1414 | return; |
| 1415 | |
| 1416 | if (!resize) |
| 1417 | return; |
| 1418 | |
| 1419 | if (resize->error != -EBUSY) { |
| 1420 | if (resize->hpt.virt) |
| 1421 | kvmppc_free_hpt(&resize->hpt); |
| 1422 | kfree(resize); |
| 1423 | } |
| 1424 | |
| 1425 | if (kvm->arch.resize_hpt == resize) |
| 1426 | kvm->arch.resize_hpt = NULL; |
| 1427 | } |
| 1428 | |
| 1429 | static void resize_hpt_prepare_work(struct work_struct *work) |
| 1430 | { |
| 1431 | struct kvm_resize_hpt *resize = container_of(work, |
| 1432 | struct kvm_resize_hpt, |
| 1433 | work); |
| 1434 | struct kvm *kvm = resize->kvm; |
| 1435 | int err = 0; |
| 1436 | |
| 1437 | if (WARN_ON(resize->error != -EBUSY)) |
| 1438 | return; |
| 1439 | |
| 1440 | mutex_lock(&kvm->arch.mmu_setup_lock); |
| 1441 | |
| 1442 | /* Request is still current? */ |
| 1443 | if (kvm->arch.resize_hpt == resize) { |
| 1444 | /* We may request large allocations here: |
| 1445 | * do not sleep with kvm->arch.mmu_setup_lock held for a while. |
| 1446 | */ |
| 1447 | mutex_unlock(&kvm->arch.mmu_setup_lock); |
| 1448 | |
| 1449 | resize_hpt_debug(resize, "%s(): order = %d\n", __func__, |
| 1450 | resize->order); |
| 1451 | |
| 1452 | err = resize_hpt_allocate(resize); |
| 1453 | |
| 1454 | /* We have strict assumption about -EBUSY |
| 1455 | * when preparing for HPT resize. |
| 1456 | */ |
| 1457 | if (WARN_ON(err == -EBUSY)) |
| 1458 | err = -EINPROGRESS; |
| 1459 | |
| 1460 | mutex_lock(&kvm->arch.mmu_setup_lock); |
| 1461 | /* It is possible that kvm->arch.resize_hpt != resize |
| 1462 | * after we grab kvm->arch.mmu_setup_lock again. |
| 1463 | */ |
| 1464 | } |
| 1465 | |
| 1466 | resize->error = err; |
| 1467 | |
| 1468 | if (kvm->arch.resize_hpt != resize) |
| 1469 | resize_hpt_release(kvm, resize); |
| 1470 | |
| 1471 | mutex_unlock(&kvm->arch.mmu_setup_lock); |
| 1472 | } |
| 1473 | |
| 1474 | int kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, |
| 1475 | struct kvm_ppc_resize_hpt *rhpt) |
| 1476 | { |
| 1477 | unsigned long flags = rhpt->flags; |
| 1478 | unsigned long shift = rhpt->shift; |
| 1479 | struct kvm_resize_hpt *resize; |
| 1480 | int ret; |
| 1481 | |
| 1482 | if (flags != 0 || kvm_is_radix(kvm)) |
| 1483 | return -EINVAL; |
| 1484 | |
| 1485 | if (shift && ((shift < 18) || (shift > 46))) |
| 1486 | return -EINVAL; |
| 1487 | |
| 1488 | mutex_lock(&kvm->arch.mmu_setup_lock); |
| 1489 | |
| 1490 | resize = kvm->arch.resize_hpt; |
| 1491 | |
| 1492 | if (resize) { |
| 1493 | if (resize->order == shift) { |
| 1494 | /* Suitable resize in progress? */ |
| 1495 | ret = resize->error; |
| 1496 | if (ret == -EBUSY) |
| 1497 | ret = 100; /* estimated time in ms */ |
| 1498 | else if (ret) |
| 1499 | resize_hpt_release(kvm, resize); |
| 1500 | |
| 1501 | goto out; |
| 1502 | } |
| 1503 | |
| 1504 | /* not suitable, cancel it */ |
| 1505 | resize_hpt_release(kvm, resize); |
| 1506 | } |
| 1507 | |
| 1508 | ret = 0; |
| 1509 | if (!shift) |
| 1510 | goto out; /* nothing to do */ |
| 1511 | |
| 1512 | /* start new resize */ |
| 1513 | |
| 1514 | resize = kzalloc(sizeof(*resize), GFP_KERNEL); |
| 1515 | if (!resize) { |
| 1516 | ret = -ENOMEM; |
| 1517 | goto out; |
| 1518 | } |
| 1519 | |
| 1520 | resize->error = -EBUSY; |
| 1521 | resize->order = shift; |
| 1522 | resize->kvm = kvm; |
| 1523 | INIT_WORK(&resize->work, resize_hpt_prepare_work); |
| 1524 | kvm->arch.resize_hpt = resize; |
| 1525 | |
| 1526 | schedule_work(&resize->work); |
| 1527 | |
| 1528 | ret = 100; /* estimated time in ms */ |
| 1529 | |
| 1530 | out: |
| 1531 | mutex_unlock(&kvm->arch.mmu_setup_lock); |
| 1532 | return ret; |
| 1533 | } |
| 1534 | |
| 1535 | static void resize_hpt_boot_vcpu(void *opaque) |
| 1536 | { |
| 1537 | /* Nothing to do, just force a KVM exit */ |
| 1538 | } |
| 1539 | |
| 1540 | int kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, |
| 1541 | struct kvm_ppc_resize_hpt *rhpt) |
| 1542 | { |
| 1543 | unsigned long flags = rhpt->flags; |
| 1544 | unsigned long shift = rhpt->shift; |
| 1545 | struct kvm_resize_hpt *resize; |
| 1546 | int ret; |
| 1547 | |
| 1548 | if (flags != 0 || kvm_is_radix(kvm)) |
| 1549 | return -EINVAL; |
| 1550 | |
| 1551 | if (shift && ((shift < 18) || (shift > 46))) |
| 1552 | return -EINVAL; |
| 1553 | |
| 1554 | mutex_lock(&kvm->arch.mmu_setup_lock); |
| 1555 | |
| 1556 | resize = kvm->arch.resize_hpt; |
| 1557 | |
| 1558 | /* This shouldn't be possible */ |
| 1559 | ret = -EIO; |
| 1560 | if (WARN_ON(!kvm->arch.mmu_ready)) |
| 1561 | goto out_no_hpt; |
| 1562 | |
| 1563 | /* Stop VCPUs from running while we mess with the HPT */ |
| 1564 | kvm->arch.mmu_ready = 0; |
| 1565 | smp_mb(); |
| 1566 | |
| 1567 | /* Boot all CPUs out of the guest so they re-read |
| 1568 | * mmu_ready */ |
| 1569 | on_each_cpu(resize_hpt_boot_vcpu, NULL, 1); |
| 1570 | |
| 1571 | ret = -ENXIO; |
| 1572 | if (!resize || (resize->order != shift)) |
| 1573 | goto out; |
| 1574 | |
| 1575 | ret = resize->error; |
| 1576 | if (ret) |
| 1577 | goto out; |
| 1578 | |
| 1579 | ret = resize_hpt_rehash(resize); |
| 1580 | if (ret) |
| 1581 | goto out; |
| 1582 | |
| 1583 | resize_hpt_pivot(resize); |
| 1584 | |
| 1585 | out: |
| 1586 | /* Let VCPUs run again */ |
| 1587 | kvm->arch.mmu_ready = 1; |
| 1588 | smp_mb(); |
| 1589 | out_no_hpt: |
| 1590 | resize_hpt_release(kvm, resize); |
| 1591 | mutex_unlock(&kvm->arch.mmu_setup_lock); |
| 1592 | return ret; |
| 1593 | } |
| 1594 | |
| 1595 | /* |
| 1596 | * Functions for reading and writing the hash table via reads and |
| 1597 | * writes on a file descriptor. |
| 1598 | * |
| 1599 | * Reads return the guest view of the hash table, which has to be |
| 1600 | * pieced together from the real hash table and the guest_rpte |
| 1601 | * values in the revmap array. |
| 1602 | * |
| 1603 | * On writes, each HPTE written is considered in turn, and if it |
| 1604 | * is valid, it is written to the HPT as if an H_ENTER with the |
| 1605 | * exact flag set was done. When the invalid count is non-zero |
| 1606 | * in the header written to the stream, the kernel will make |
| 1607 | * sure that that many HPTEs are invalid, and invalidate them |
| 1608 | * if not. |
| 1609 | */ |
| 1610 | |
| 1611 | struct kvm_htab_ctx { |
| 1612 | unsigned long index; |
| 1613 | unsigned long flags; |
| 1614 | struct kvm *kvm; |
| 1615 | int first_pass; |
| 1616 | }; |
| 1617 | |
| 1618 | #define HPTE_SIZE (2 * sizeof(unsigned long)) |
| 1619 | |
| 1620 | /* |
| 1621 | * Returns 1 if this HPT entry has been modified or has pending |
| 1622 | * R/C bit changes. |
| 1623 | */ |
| 1624 | static int hpte_dirty(struct revmap_entry *revp, __be64 *hptp) |
| 1625 | { |
| 1626 | unsigned long rcbits_unset; |
| 1627 | |
| 1628 | if (revp->guest_rpte & HPTE_GR_MODIFIED) |
| 1629 | return 1; |
| 1630 | |
| 1631 | /* Also need to consider changes in reference and changed bits */ |
| 1632 | rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); |
| 1633 | if ((be64_to_cpu(hptp[0]) & HPTE_V_VALID) && |
| 1634 | (be64_to_cpu(hptp[1]) & rcbits_unset)) |
| 1635 | return 1; |
| 1636 | |
| 1637 | return 0; |
| 1638 | } |
| 1639 | |
| 1640 | static long record_hpte(unsigned long flags, __be64 *hptp, |
| 1641 | unsigned long *hpte, struct revmap_entry *revp, |
| 1642 | int want_valid, int first_pass) |
| 1643 | { |
| 1644 | unsigned long v, r, hr; |
| 1645 | unsigned long rcbits_unset; |
| 1646 | int ok = 1; |
| 1647 | int valid, dirty; |
| 1648 | |
| 1649 | /* Unmodified entries are uninteresting except on the first pass */ |
| 1650 | dirty = hpte_dirty(revp, hptp); |
| 1651 | if (!first_pass && !dirty) |
| 1652 | return 0; |
| 1653 | |
| 1654 | valid = 0; |
| 1655 | if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) { |
| 1656 | valid = 1; |
| 1657 | if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && |
| 1658 | !(be64_to_cpu(hptp[0]) & HPTE_V_BOLTED)) |
| 1659 | valid = 0; |
| 1660 | } |
| 1661 | if (valid != want_valid) |
| 1662 | return 0; |
| 1663 | |
| 1664 | v = r = 0; |
| 1665 | if (valid || dirty) { |
| 1666 | /* lock the HPTE so it's stable and read it */ |
| 1667 | preempt_disable(); |
| 1668 | while (!try_lock_hpte(hptp, HPTE_V_HVLOCK)) |
| 1669 | cpu_relax(); |
| 1670 | v = be64_to_cpu(hptp[0]); |
| 1671 | hr = be64_to_cpu(hptp[1]); |
| 1672 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
| 1673 | v = hpte_new_to_old_v(v, hr); |
| 1674 | hr = hpte_new_to_old_r(hr); |
| 1675 | } |
| 1676 | |
| 1677 | /* re-evaluate valid and dirty from synchronized HPTE value */ |
| 1678 | valid = !!(v & HPTE_V_VALID); |
| 1679 | dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED); |
| 1680 | |
| 1681 | /* Harvest R and C into guest view if necessary */ |
| 1682 | rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); |
| 1683 | if (valid && (rcbits_unset & hr)) { |
| 1684 | revp->guest_rpte |= (hr & |
| 1685 | (HPTE_R_R | HPTE_R_C)) | HPTE_GR_MODIFIED; |
| 1686 | dirty = 1; |
| 1687 | } |
| 1688 | |
| 1689 | if (v & HPTE_V_ABSENT) { |
| 1690 | v &= ~HPTE_V_ABSENT; |
| 1691 | v |= HPTE_V_VALID; |
| 1692 | valid = 1; |
| 1693 | } |
| 1694 | if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && !(v & HPTE_V_BOLTED)) |
| 1695 | valid = 0; |
| 1696 | |
| 1697 | r = revp->guest_rpte; |
| 1698 | /* only clear modified if this is the right sort of entry */ |
| 1699 | if (valid == want_valid && dirty) { |
| 1700 | r &= ~HPTE_GR_MODIFIED; |
| 1701 | revp->guest_rpte = r; |
| 1702 | } |
| 1703 | unlock_hpte(hptp, be64_to_cpu(hptp[0])); |
| 1704 | preempt_enable(); |
| 1705 | if (!(valid == want_valid && (first_pass || dirty))) |
| 1706 | ok = 0; |
| 1707 | } |
| 1708 | hpte[0] = cpu_to_be64(v); |
| 1709 | hpte[1] = cpu_to_be64(r); |
| 1710 | return ok; |
| 1711 | } |
| 1712 | |
| 1713 | static ssize_t kvm_htab_read(struct file *file, char __user *buf, |
| 1714 | size_t count, loff_t *ppos) |
| 1715 | { |
| 1716 | struct kvm_htab_ctx *ctx = file->private_data; |
| 1717 | struct kvm *kvm = ctx->kvm; |
| 1718 | struct kvm_get_htab_header hdr; |
| 1719 | __be64 *hptp; |
| 1720 | struct revmap_entry *revp; |
| 1721 | unsigned long i, nb, nw; |
| 1722 | unsigned long __user *lbuf; |
| 1723 | struct kvm_get_htab_header __user *hptr; |
| 1724 | unsigned long flags; |
| 1725 | int first_pass; |
| 1726 | unsigned long hpte[2]; |
| 1727 | |
| 1728 | if (!access_ok(buf, count)) |
| 1729 | return -EFAULT; |
| 1730 | if (kvm_is_radix(kvm)) |
| 1731 | return 0; |
| 1732 | |
| 1733 | first_pass = ctx->first_pass; |
| 1734 | flags = ctx->flags; |
| 1735 | |
| 1736 | i = ctx->index; |
| 1737 | hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); |
| 1738 | revp = kvm->arch.hpt.rev + i; |
| 1739 | lbuf = (unsigned long __user *)buf; |
| 1740 | |
| 1741 | nb = 0; |
| 1742 | while (nb + sizeof(hdr) + HPTE_SIZE < count) { |
| 1743 | /* Initialize header */ |
| 1744 | hptr = (struct kvm_get_htab_header __user *)buf; |
| 1745 | hdr.n_valid = 0; |
| 1746 | hdr.n_invalid = 0; |
| 1747 | nw = nb; |
| 1748 | nb += sizeof(hdr); |
| 1749 | lbuf = (unsigned long __user *)(buf + sizeof(hdr)); |
| 1750 | |
| 1751 | /* Skip uninteresting entries, i.e. clean on not-first pass */ |
| 1752 | if (!first_pass) { |
| 1753 | while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && |
| 1754 | !hpte_dirty(revp, hptp)) { |
| 1755 | ++i; |
| 1756 | hptp += 2; |
| 1757 | ++revp; |
| 1758 | } |
| 1759 | } |
| 1760 | hdr.index = i; |
| 1761 | |
| 1762 | /* Grab a series of valid entries */ |
| 1763 | while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && |
| 1764 | hdr.n_valid < 0xffff && |
| 1765 | nb + HPTE_SIZE < count && |
| 1766 | record_hpte(flags, hptp, hpte, revp, 1, first_pass)) { |
| 1767 | /* valid entry, write it out */ |
| 1768 | ++hdr.n_valid; |
| 1769 | if (__put_user(hpte[0], lbuf) || |
| 1770 | __put_user(hpte[1], lbuf + 1)) |
| 1771 | return -EFAULT; |
| 1772 | nb += HPTE_SIZE; |
| 1773 | lbuf += 2; |
| 1774 | ++i; |
| 1775 | hptp += 2; |
| 1776 | ++revp; |
| 1777 | } |
| 1778 | /* Now skip invalid entries while we can */ |
| 1779 | while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && |
| 1780 | hdr.n_invalid < 0xffff && |
| 1781 | record_hpte(flags, hptp, hpte, revp, 0, first_pass)) { |
| 1782 | /* found an invalid entry */ |
| 1783 | ++hdr.n_invalid; |
| 1784 | ++i; |
| 1785 | hptp += 2; |
| 1786 | ++revp; |
| 1787 | } |
| 1788 | |
| 1789 | if (hdr.n_valid || hdr.n_invalid) { |
| 1790 | /* write back the header */ |
| 1791 | if (__copy_to_user(hptr, &hdr, sizeof(hdr))) |
| 1792 | return -EFAULT; |
| 1793 | nw = nb; |
| 1794 | buf = (char __user *)lbuf; |
| 1795 | } else { |
| 1796 | nb = nw; |
| 1797 | } |
| 1798 | |
| 1799 | /* Check if we've wrapped around the hash table */ |
| 1800 | if (i >= kvmppc_hpt_npte(&kvm->arch.hpt)) { |
| 1801 | i = 0; |
| 1802 | ctx->first_pass = 0; |
| 1803 | break; |
| 1804 | } |
| 1805 | } |
| 1806 | |
| 1807 | ctx->index = i; |
| 1808 | |
| 1809 | return nb; |
| 1810 | } |
| 1811 | |
| 1812 | static ssize_t kvm_htab_write(struct file *file, const char __user *buf, |
| 1813 | size_t count, loff_t *ppos) |
| 1814 | { |
| 1815 | struct kvm_htab_ctx *ctx = file->private_data; |
| 1816 | struct kvm *kvm = ctx->kvm; |
| 1817 | struct kvm_get_htab_header hdr; |
| 1818 | unsigned long i, j; |
| 1819 | unsigned long v, r; |
| 1820 | unsigned long __user *lbuf; |
| 1821 | __be64 *hptp; |
| 1822 | unsigned long tmp[2]; |
| 1823 | ssize_t nb; |
| 1824 | long int err, ret; |
| 1825 | int mmu_ready; |
| 1826 | int pshift; |
| 1827 | |
| 1828 | if (!access_ok(buf, count)) |
| 1829 | return -EFAULT; |
| 1830 | if (kvm_is_radix(kvm)) |
| 1831 | return -EINVAL; |
| 1832 | |
| 1833 | /* lock out vcpus from running while we're doing this */ |
| 1834 | mutex_lock(&kvm->arch.mmu_setup_lock); |
| 1835 | mmu_ready = kvm->arch.mmu_ready; |
| 1836 | if (mmu_ready) { |
| 1837 | kvm->arch.mmu_ready = 0; /* temporarily */ |
| 1838 | /* order mmu_ready vs. vcpus_running */ |
| 1839 | smp_mb(); |
| 1840 | if (atomic_read(&kvm->arch.vcpus_running)) { |
| 1841 | kvm->arch.mmu_ready = 1; |
| 1842 | mutex_unlock(&kvm->arch.mmu_setup_lock); |
| 1843 | return -EBUSY; |
| 1844 | } |
| 1845 | } |
| 1846 | |
| 1847 | err = 0; |
| 1848 | for (nb = 0; nb + sizeof(hdr) <= count; ) { |
| 1849 | err = -EFAULT; |
| 1850 | if (__copy_from_user(&hdr, buf, sizeof(hdr))) |
| 1851 | break; |
| 1852 | |
| 1853 | err = 0; |
| 1854 | if (nb + hdr.n_valid * HPTE_SIZE > count) |
| 1855 | break; |
| 1856 | |
| 1857 | nb += sizeof(hdr); |
| 1858 | buf += sizeof(hdr); |
| 1859 | |
| 1860 | err = -EINVAL; |
| 1861 | i = hdr.index; |
| 1862 | if (i >= kvmppc_hpt_npte(&kvm->arch.hpt) || |
| 1863 | i + hdr.n_valid + hdr.n_invalid > kvmppc_hpt_npte(&kvm->arch.hpt)) |
| 1864 | break; |
| 1865 | |
| 1866 | hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); |
| 1867 | lbuf = (unsigned long __user *)buf; |
| 1868 | for (j = 0; j < hdr.n_valid; ++j) { |
| 1869 | __be64 hpte_v; |
| 1870 | __be64 hpte_r; |
| 1871 | |
| 1872 | err = -EFAULT; |
| 1873 | if (__get_user(hpte_v, lbuf) || |
| 1874 | __get_user(hpte_r, lbuf + 1)) |
| 1875 | goto out; |
| 1876 | v = be64_to_cpu(hpte_v); |
| 1877 | r = be64_to_cpu(hpte_r); |
| 1878 | err = -EINVAL; |
| 1879 | if (!(v & HPTE_V_VALID)) |
| 1880 | goto out; |
| 1881 | pshift = kvmppc_hpte_base_page_shift(v, r); |
| 1882 | if (pshift <= 0) |
| 1883 | goto out; |
| 1884 | lbuf += 2; |
| 1885 | nb += HPTE_SIZE; |
| 1886 | |
| 1887 | if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) |
| 1888 | kvmppc_do_h_remove(kvm, 0, i, 0, tmp); |
| 1889 | err = -EIO; |
| 1890 | ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r, |
| 1891 | tmp); |
| 1892 | if (ret != H_SUCCESS) { |
| 1893 | pr_err("%s ret %ld i=%ld v=%lx r=%lx\n", __func__, ret, i, v, r); |
| 1894 | goto out; |
| 1895 | } |
| 1896 | if (!mmu_ready && is_vrma_hpte(v)) { |
| 1897 | unsigned long senc, lpcr; |
| 1898 | |
| 1899 | senc = slb_pgsize_encoding(1ul << pshift); |
| 1900 | kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | |
| 1901 | (VRMA_VSID << SLB_VSID_SHIFT_1T); |
| 1902 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) { |
| 1903 | lpcr = senc << (LPCR_VRMASD_SH - 4); |
| 1904 | kvmppc_update_lpcr(kvm, lpcr, |
| 1905 | LPCR_VRMASD); |
| 1906 | } else { |
| 1907 | kvmppc_setup_partition_table(kvm); |
| 1908 | } |
| 1909 | mmu_ready = 1; |
| 1910 | } |
| 1911 | ++i; |
| 1912 | hptp += 2; |
| 1913 | } |
| 1914 | |
| 1915 | for (j = 0; j < hdr.n_invalid; ++j) { |
| 1916 | if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) |
| 1917 | kvmppc_do_h_remove(kvm, 0, i, 0, tmp); |
| 1918 | ++i; |
| 1919 | hptp += 2; |
| 1920 | } |
| 1921 | err = 0; |
| 1922 | } |
| 1923 | |
| 1924 | out: |
| 1925 | /* Order HPTE updates vs. mmu_ready */ |
| 1926 | smp_wmb(); |
| 1927 | kvm->arch.mmu_ready = mmu_ready; |
| 1928 | mutex_unlock(&kvm->arch.mmu_setup_lock); |
| 1929 | |
| 1930 | if (err) |
| 1931 | return err; |
| 1932 | return nb; |
| 1933 | } |
| 1934 | |
| 1935 | static int kvm_htab_release(struct inode *inode, struct file *filp) |
| 1936 | { |
| 1937 | struct kvm_htab_ctx *ctx = filp->private_data; |
| 1938 | |
| 1939 | filp->private_data = NULL; |
| 1940 | if (!(ctx->flags & KVM_GET_HTAB_WRITE)) |
| 1941 | atomic_dec(&ctx->kvm->arch.hpte_mod_interest); |
| 1942 | kvm_put_kvm(ctx->kvm); |
| 1943 | kfree(ctx); |
| 1944 | return 0; |
| 1945 | } |
| 1946 | |
| 1947 | static const struct file_operations kvm_htab_fops = { |
| 1948 | .read = kvm_htab_read, |
| 1949 | .write = kvm_htab_write, |
| 1950 | .llseek = default_llseek, |
| 1951 | .release = kvm_htab_release, |
| 1952 | }; |
| 1953 | |
| 1954 | int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf) |
| 1955 | { |
| 1956 | int ret; |
| 1957 | struct kvm_htab_ctx *ctx; |
| 1958 | int rwflag; |
| 1959 | |
| 1960 | /* reject flags we don't recognize */ |
| 1961 | if (ghf->flags & ~(KVM_GET_HTAB_BOLTED_ONLY | KVM_GET_HTAB_WRITE)) |
| 1962 | return -EINVAL; |
| 1963 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
| 1964 | if (!ctx) |
| 1965 | return -ENOMEM; |
| 1966 | kvm_get_kvm(kvm); |
| 1967 | ctx->kvm = kvm; |
| 1968 | ctx->index = ghf->start_index; |
| 1969 | ctx->flags = ghf->flags; |
| 1970 | ctx->first_pass = 1; |
| 1971 | |
| 1972 | rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY; |
| 1973 | ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag | O_CLOEXEC); |
| 1974 | if (ret < 0) { |
| 1975 | kfree(ctx); |
| 1976 | kvm_put_kvm_no_destroy(kvm); |
| 1977 | return ret; |
| 1978 | } |
| 1979 | |
| 1980 | if (rwflag == O_RDONLY) { |
| 1981 | mutex_lock(&kvm->slots_lock); |
| 1982 | atomic_inc(&kvm->arch.hpte_mod_interest); |
| 1983 | /* make sure kvmppc_do_h_enter etc. see the increment */ |
| 1984 | synchronize_srcu_expedited(&kvm->srcu); |
| 1985 | mutex_unlock(&kvm->slots_lock); |
| 1986 | } |
| 1987 | |
| 1988 | return ret; |
| 1989 | } |
| 1990 | |
| 1991 | struct debugfs_htab_state { |
| 1992 | struct kvm *kvm; |
| 1993 | struct mutex mutex; |
| 1994 | unsigned long hpt_index; |
| 1995 | int chars_left; |
| 1996 | int buf_index; |
| 1997 | char buf[64]; |
| 1998 | }; |
| 1999 | |
| 2000 | static int debugfs_htab_open(struct inode *inode, struct file *file) |
| 2001 | { |
| 2002 | struct kvm *kvm = inode->i_private; |
| 2003 | struct debugfs_htab_state *p; |
| 2004 | |
| 2005 | p = kzalloc(sizeof(*p), GFP_KERNEL); |
| 2006 | if (!p) |
| 2007 | return -ENOMEM; |
| 2008 | |
| 2009 | kvm_get_kvm(kvm); |
| 2010 | p->kvm = kvm; |
| 2011 | mutex_init(&p->mutex); |
| 2012 | file->private_data = p; |
| 2013 | |
| 2014 | return nonseekable_open(inode, file); |
| 2015 | } |
| 2016 | |
| 2017 | static int debugfs_htab_release(struct inode *inode, struct file *file) |
| 2018 | { |
| 2019 | struct debugfs_htab_state *p = file->private_data; |
| 2020 | |
| 2021 | kvm_put_kvm(p->kvm); |
| 2022 | kfree(p); |
| 2023 | return 0; |
| 2024 | } |
| 2025 | |
| 2026 | static ssize_t debugfs_htab_read(struct file *file, char __user *buf, |
| 2027 | size_t len, loff_t *ppos) |
| 2028 | { |
| 2029 | struct debugfs_htab_state *p = file->private_data; |
| 2030 | ssize_t ret, r; |
| 2031 | unsigned long i, n; |
| 2032 | unsigned long v, hr, gr; |
| 2033 | struct kvm *kvm; |
| 2034 | __be64 *hptp; |
| 2035 | |
| 2036 | kvm = p->kvm; |
| 2037 | if (kvm_is_radix(kvm)) |
| 2038 | return 0; |
| 2039 | |
| 2040 | ret = mutex_lock_interruptible(&p->mutex); |
| 2041 | if (ret) |
| 2042 | return ret; |
| 2043 | |
| 2044 | if (p->chars_left) { |
| 2045 | n = p->chars_left; |
| 2046 | if (n > len) |
| 2047 | n = len; |
| 2048 | r = copy_to_user(buf, p->buf + p->buf_index, n); |
| 2049 | n -= r; |
| 2050 | p->chars_left -= n; |
| 2051 | p->buf_index += n; |
| 2052 | buf += n; |
| 2053 | len -= n; |
| 2054 | ret = n; |
| 2055 | if (r) { |
| 2056 | if (!n) |
| 2057 | ret = -EFAULT; |
| 2058 | goto out; |
| 2059 | } |
| 2060 | } |
| 2061 | |
| 2062 | i = p->hpt_index; |
| 2063 | hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); |
| 2064 | for (; len != 0 && i < kvmppc_hpt_npte(&kvm->arch.hpt); |
| 2065 | ++i, hptp += 2) { |
| 2066 | if (!(be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))) |
| 2067 | continue; |
| 2068 | |
| 2069 | /* lock the HPTE so it's stable and read it */ |
| 2070 | preempt_disable(); |
| 2071 | while (!try_lock_hpte(hptp, HPTE_V_HVLOCK)) |
| 2072 | cpu_relax(); |
| 2073 | v = be64_to_cpu(hptp[0]) & ~HPTE_V_HVLOCK; |
| 2074 | hr = be64_to_cpu(hptp[1]); |
| 2075 | gr = kvm->arch.hpt.rev[i].guest_rpte; |
| 2076 | unlock_hpte(hptp, v); |
| 2077 | preempt_enable(); |
| 2078 | |
| 2079 | if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT))) |
| 2080 | continue; |
| 2081 | |
| 2082 | n = scnprintf(p->buf, sizeof(p->buf), |
| 2083 | "%6lx %.16lx %.16lx %.16lx\n", |
| 2084 | i, v, hr, gr); |
| 2085 | p->chars_left = n; |
| 2086 | if (n > len) |
| 2087 | n = len; |
| 2088 | r = copy_to_user(buf, p->buf, n); |
| 2089 | n -= r; |
| 2090 | p->chars_left -= n; |
| 2091 | p->buf_index = n; |
| 2092 | buf += n; |
| 2093 | len -= n; |
| 2094 | ret += n; |
| 2095 | if (r) { |
| 2096 | if (!ret) |
| 2097 | ret = -EFAULT; |
| 2098 | goto out; |
| 2099 | } |
| 2100 | } |
| 2101 | p->hpt_index = i; |
| 2102 | |
| 2103 | out: |
| 2104 | mutex_unlock(&p->mutex); |
| 2105 | return ret; |
| 2106 | } |
| 2107 | |
| 2108 | static ssize_t debugfs_htab_write(struct file *file, const char __user *buf, |
| 2109 | size_t len, loff_t *ppos) |
| 2110 | { |
| 2111 | return -EACCES; |
| 2112 | } |
| 2113 | |
| 2114 | static const struct file_operations debugfs_htab_fops = { |
| 2115 | .owner = THIS_MODULE, |
| 2116 | .open = debugfs_htab_open, |
| 2117 | .release = debugfs_htab_release, |
| 2118 | .read = debugfs_htab_read, |
| 2119 | .write = debugfs_htab_write, |
| 2120 | .llseek = generic_file_llseek, |
| 2121 | }; |
| 2122 | |
| 2123 | void kvmppc_mmu_debugfs_init(struct kvm *kvm) |
| 2124 | { |
| 2125 | debugfs_create_file("htab", 0400, kvm->debugfs_dentry, kvm, |
| 2126 | &debugfs_htab_fops); |
| 2127 | } |
| 2128 | |
| 2129 | void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu) |
| 2130 | { |
| 2131 | struct kvmppc_mmu *mmu = &vcpu->arch.mmu; |
| 2132 | |
| 2133 | vcpu->arch.slb_nr = 32; /* POWER7/POWER8 */ |
| 2134 | |
| 2135 | mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate; |
| 2136 | |
| 2137 | vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; |
| 2138 | } |