3 * Copyright (C) 2001 Todd Inglett, IBM Corporation
5 * pSeries LPAR support.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 /* Enables debugging of low-level hash table routines - careful! */
24 #define pr_fmt(fmt) "lpar: " fmt
26 #include <linux/kernel.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/console.h>
29 #include <linux/export.h>
30 #include <linux/jump_label.h>
31 #include <linux/delay.h>
32 #include <linux/stop_machine.h>
33 #include <asm/processor.h>
36 #include <asm/pgtable.h>
37 #include <asm/machdep.h>
38 #include <asm/mmu_context.h>
39 #include <asm/iommu.h>
42 #include <asm/cputable.h>
45 #include <asm/trace.h>
46 #include <asm/firmware.h>
47 #include <asm/plpar_wrappers.h>
48 #include <asm/kexec.h>
49 #include <asm/fadump.h>
50 #include <asm/asm-prototypes.h>
51 #include <asm/debugfs.h>
55 /* Flag bits for H_BULK_REMOVE */
56 #define HBR_REQUEST 0x4000000000000000UL
57 #define HBR_RESPONSE 0x8000000000000000UL
58 #define HBR_END 0xc000000000000000UL
59 #define HBR_AVPN 0x0200000000000000UL
60 #define HBR_ANDCOND 0x0100000000000000UL
64 EXPORT_SYMBOL(plpar_hcall);
65 EXPORT_SYMBOL(plpar_hcall9);
66 EXPORT_SYMBOL(plpar_hcall_norets);
68 void alloc_dtl_buffers(void)
71 struct paca_struct *pp;
72 struct dtl_entry *dtl;
74 for_each_possible_cpu(cpu) {
76 dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
78 pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
80 pr_warn("Stolen time statistics will be unreliable\n");
85 pp->dispatch_log = dtl;
86 pp->dispatch_log_end = dtl + N_DISPATCH_LOG;
91 void register_dtl_buffer(int cpu)
94 struct paca_struct *pp;
95 struct dtl_entry *dtl;
96 int hwcpu = get_hard_smp_processor_id(cpu);
99 dtl = pp->dispatch_log;
103 lppaca_of(cpu).dtl_idx = 0;
105 /* hypervisor reads buffer length from this field */
106 dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
107 ret = register_dtl(hwcpu, __pa(dtl));
109 pr_err("WARNING: DTL registration of cpu %d (hw %d) failed with %ld\n",
112 lppaca_of(cpu).dtl_enable_mask = DTL_LOG_PREEMPT;
116 #ifdef CONFIG_PPC_SPLPAR
117 DEFINE_RWLOCK(dtl_access_lock);
118 #endif /* CONFIG_PPC_SPLPAR */
120 void vpa_init(int cpu)
122 int hwcpu = get_hard_smp_processor_id(cpu);
127 * The spec says it "may be problematic" if CPU x registers the VPA of
128 * CPU y. We should never do that, but wail if we ever do.
130 WARN_ON(cpu != smp_processor_id());
132 if (cpu_has_feature(CPU_FTR_ALTIVEC))
133 lppaca_of(cpu).vmxregs_in_use = 1;
135 if (cpu_has_feature(CPU_FTR_ARCH_207S))
136 lppaca_of(cpu).ebb_regs_in_use = 1;
138 addr = __pa(&lppaca_of(cpu));
139 ret = register_vpa(hwcpu, addr);
142 pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
143 "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
147 #ifdef CONFIG_PPC_BOOK3S_64
149 * PAPR says this feature is SLB-Buffer but firmware never
150 * reports that. All SPLPAR support SLB shadow buffer.
152 if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR)) {
153 addr = __pa(paca_ptrs[cpu]->slb_shadow_ptr);
154 ret = register_slb_shadow(hwcpu, addr);
156 pr_err("WARNING: SLB shadow buffer registration for "
157 "cpu %d (hw %d) of area %lx failed with %ld\n",
158 cpu, hwcpu, addr, ret);
160 #endif /* CONFIG_PPC_BOOK3S_64 */
163 * Register dispatch trace log, if one has been allocated.
165 register_dtl_buffer(cpu);
168 #ifdef CONFIG_PPC_BOOK3S_64
170 static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
171 unsigned long vpn, unsigned long pa,
172 unsigned long rflags, unsigned long vflags,
173 int psize, int apsize, int ssize)
175 unsigned long lpar_rc;
178 unsigned long hpte_v, hpte_r;
180 if (!(vflags & HPTE_V_BOLTED))
181 pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
182 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
183 hpte_group, vpn, pa, rflags, vflags, psize);
185 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
186 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
188 if (!(vflags & HPTE_V_BOLTED))
189 pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
191 /* Now fill in the actual HPTE */
192 /* Set CEC cookie to 0 */
194 /* I-cache Invalidate = 0 */
195 /* I-cache synchronize = 0 */
199 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
200 flags |= H_COALESCE_CAND;
202 lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
203 if (unlikely(lpar_rc == H_PTEG_FULL)) {
204 pr_devel("Hash table group is full\n");
209 * Since we try and ioremap PHBs we don't own, the pte insert
210 * will fail. However we must catch the failure in hash_page
211 * or we will loop forever, so return -2 in this case.
213 if (unlikely(lpar_rc != H_SUCCESS)) {
214 pr_err("Failed hash pte insert with error %ld\n", lpar_rc);
217 if (!(vflags & HPTE_V_BOLTED))
218 pr_devel(" -> slot: %lu\n", slot & 7);
220 /* Because of iSeries, we have to pass down the secondary
221 * bucket bit here as well
223 return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
226 static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);
228 static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
230 unsigned long slot_offset;
231 unsigned long lpar_rc;
233 unsigned long dummy1, dummy2;
235 /* pick a random slot to start at */
236 slot_offset = mftb() & 0x7;
238 for (i = 0; i < HPTES_PER_GROUP; i++) {
240 /* don't remove a bolted entry */
241 lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
242 (0x1UL << 4), &dummy1, &dummy2);
243 if (lpar_rc == H_SUCCESS)
247 * The test for adjunct partition is performed before the
248 * ANDCOND test. H_RESOURCE may be returned, so we need to
249 * check for that as well.
251 BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
260 static void manual_hpte_clear_all(void)
262 unsigned long size_bytes = 1UL << ppc64_pft_size;
263 unsigned long hpte_count = size_bytes >> 4;
271 /* Read in batches of 4,
272 * invalidate only valid entries not in the VRMA
273 * hpte_count will be a multiple of 4
275 for (i = 0; i < hpte_count; i += 4) {
276 lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
277 if (lpar_rc != H_SUCCESS) {
278 pr_info("Failed to read hash page table at %ld err %ld\n",
282 for (j = 0; j < 4; j++){
283 if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
286 if (ptes[j].pteh & HPTE_V_VALID)
287 plpar_pte_remove_raw(0, i + j, 0,
288 &(ptes[j].pteh), &(ptes[j].ptel));
293 static int hcall_hpte_clear_all(void)
298 rc = plpar_hcall_norets(H_CLEAR_HPT);
299 } while (rc == H_CONTINUE);
304 static void pseries_hpte_clear_all(void)
308 rc = hcall_hpte_clear_all();
310 manual_hpte_clear_all();
312 #ifdef __LITTLE_ENDIAN__
314 * Reset exceptions to big endian.
316 * FIXME this is a hack for kexec, we need to reset the exception
317 * endian before starting the new kernel and this is a convenient place
320 * This is also called on boot when a fadump happens. In that case we
321 * must not change the exception endian mode.
323 if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active())
324 pseries_big_endian_exceptions();
329 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
330 * the low 3 bits of flags happen to line up. So no transform is needed.
331 * We can probably optimize here and assume the high bits of newpp are
332 * already zero. For now I am paranoid.
334 static long pSeries_lpar_hpte_updatepp(unsigned long slot,
337 int psize, int apsize,
338 int ssize, unsigned long inv_flags)
340 unsigned long lpar_rc;
342 unsigned long want_v;
344 want_v = hpte_encode_avpn(vpn, psize, ssize);
346 flags = (newpp & 7) | H_AVPN;
347 if (mmu_has_feature(MMU_FTR_KERNEL_RO))
348 /* Move pp0 into bit 8 (IBM 55) */
349 flags |= (newpp & HPTE_R_PP0) >> 55;
351 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
352 want_v, slot, flags, psize);
354 lpar_rc = plpar_pte_protect(flags, slot, want_v);
356 if (lpar_rc == H_NOT_FOUND) {
357 pr_devel("not found !\n");
363 BUG_ON(lpar_rc != H_SUCCESS);
368 static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group)
377 for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
379 lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
380 if (lpar_rc != H_SUCCESS) {
381 pr_info("Failed to read hash page table at %ld err %ld\n",
382 hpte_group, lpar_rc);
386 for (j = 0; j < 4; j++) {
387 if (HPTE_V_COMPARE(ptes[j].pteh, want_v) &&
388 (ptes[j].pteh & HPTE_V_VALID))
396 static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
400 unsigned long want_v;
401 unsigned long hpte_group;
403 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
404 want_v = hpte_encode_avpn(vpn, psize, ssize);
406 /* Bolted entries are always in the primary group */
407 hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
408 slot = __pSeries_lpar_hpte_find(want_v, hpte_group);
411 return hpte_group + slot;
414 static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
416 int psize, int ssize)
419 unsigned long lpar_rc, slot, vsid, flags;
421 vsid = get_kernel_vsid(ea, ssize);
422 vpn = hpt_vpn(ea, vsid, ssize);
424 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
428 if (mmu_has_feature(MMU_FTR_KERNEL_RO))
429 /* Move pp0 into bit 8 (IBM 55) */
430 flags |= (newpp & HPTE_R_PP0) >> 55;
432 lpar_rc = plpar_pte_protect(flags, slot, 0);
434 BUG_ON(lpar_rc != H_SUCCESS);
437 static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
438 int psize, int apsize,
439 int ssize, int local)
441 unsigned long want_v;
442 unsigned long lpar_rc;
443 unsigned long dummy1, dummy2;
445 pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
446 slot, vpn, psize, local);
448 want_v = hpte_encode_avpn(vpn, psize, ssize);
449 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
450 if (lpar_rc == H_NOT_FOUND)
453 BUG_ON(lpar_rc != H_SUCCESS);
458 * As defined in the PAPR's section 14.5.4.1.8
459 * The control mask doesn't include the returned reference and change bit from
462 #define HBLKR_AVPN 0x0100000000000000UL
463 #define HBLKR_CTRL_MASK 0xf800000000000000UL
464 #define HBLKR_CTRL_SUCCESS 0x8000000000000000UL
465 #define HBLKR_CTRL_ERRNOTFOUND 0x8800000000000000UL
466 #define HBLKR_CTRL_ERRBUSY 0xa000000000000000UL
469 * H_BLOCK_REMOVE caller.
470 * @idx should point to the latest @param entry set with a PTEX.
471 * If PTE cannot be processed because another CPUs has already locked that
472 * group, those entries are put back in @param starting at index 1.
473 * If entries has to be retried and @retry_busy is set to true, these entries
474 * are retried until success. If @retry_busy is set to false, the returned
475 * is the number of entries yet to process.
477 static unsigned long call_block_remove(unsigned long idx, unsigned long *param,
480 unsigned long i, rc, new_idx;
481 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
484 pr_warn("Unexpected empty call to H_BLOCK_REMOVE");
489 if (idx > PLPAR_HCALL9_BUFSIZE) {
490 pr_err("Too many PTEs (%lu) for H_BLOCK_REMOVE", idx);
491 idx = PLPAR_HCALL9_BUFSIZE;
492 } else if (idx < PLPAR_HCALL9_BUFSIZE)
493 param[idx] = HBR_END;
495 rc = plpar_hcall9(H_BLOCK_REMOVE, retbuf,
497 param[1], param[2], param[3], param[4], /* TS0-7 */
498 param[5], param[6], param[7], param[8]);
502 BUG_ON(rc != H_PARTIAL);
504 /* Check that the unprocessed entries were 'not found' or 'busy' */
505 for (i = 0; i < idx-1; i++) {
506 unsigned long ctrl = retbuf[i] & HBLKR_CTRL_MASK;
508 if (ctrl == HBLKR_CTRL_ERRBUSY) {
509 param[++new_idx] = param[i+1];
513 BUG_ON(ctrl != HBLKR_CTRL_SUCCESS
514 && ctrl != HBLKR_CTRL_ERRNOTFOUND);
518 * If there were entries found busy, retry these entries if requested,
519 * of if all the entries have to be retried.
521 if (new_idx && (retry_busy || new_idx == (PLPAR_HCALL9_BUFSIZE-1))) {
529 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
531 * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
532 * to make sure that we avoid bouncing the hypervisor tlbie lock.
534 #define PPC64_HUGE_HPTE_BATCH 12
536 static void hugepage_block_invalidate(unsigned long *slot, unsigned long *vpn,
537 int count, int psize, int ssize)
539 unsigned long param[PLPAR_HCALL9_BUFSIZE];
540 unsigned long shift, current_vpgb, vpgb;
543 shift = mmu_psize_defs[psize].shift;
545 for (i = 0; i < count; i++) {
547 * Shifting 3 bits more on the right to get a
548 * 8 pages aligned virtual addresse.
550 vpgb = (vpn[i] >> (shift - VPN_SHIFT + 3));
551 if (!pix || vpgb != current_vpgb) {
553 * Need to start a new 8 pages block, flush
554 * the current one if needed.
557 (void)call_block_remove(pix, param, true);
559 param[0] = hpte_encode_avpn(vpn[i], psize, ssize);
563 param[pix++] = HBR_REQUEST | HBLKR_AVPN | slot[i];
564 if (pix == PLPAR_HCALL9_BUFSIZE) {
565 pix = call_block_remove(pix, param, false);
567 * pix = 0 means that all the entries were
568 * removed, we can start a new block.
569 * Otherwise, this means that there are entries
570 * to retry, and pix points to latest one, so
571 * we should increment it and try to continue
579 (void)call_block_remove(pix, param, true);
582 static void hugepage_bulk_invalidate(unsigned long *slot, unsigned long *vpn,
583 int count, int psize, int ssize)
585 unsigned long param[PLPAR_HCALL9_BUFSIZE];
586 int i = 0, pix = 0, rc;
588 for (i = 0; i < count; i++) {
590 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
591 pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0,
594 param[pix] = HBR_REQUEST | HBR_AVPN | slot[i];
595 param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize);
598 rc = plpar_hcall9(H_BULK_REMOVE, param,
599 param[0], param[1], param[2],
600 param[3], param[4], param[5],
602 BUG_ON(rc != H_SUCCESS);
608 param[pix] = HBR_END;
609 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
610 param[2], param[3], param[4], param[5],
612 BUG_ON(rc != H_SUCCESS);
616 static inline void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
618 int count, int psize,
621 unsigned long flags = 0;
622 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
625 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
627 if (firmware_has_feature(FW_FEATURE_BLOCK_REMOVE))
628 hugepage_block_invalidate(slot, vpn, count, psize, ssize);
630 hugepage_bulk_invalidate(slot, vpn, count, psize, ssize);
633 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
636 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
638 unsigned char *hpte_slot_array,
639 int psize, int ssize, int local)
642 unsigned long s_addr = addr;
643 unsigned int max_hpte_count, valid;
644 unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
645 unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
646 unsigned long shift, hidx, vpn = 0, hash, slot;
648 shift = mmu_psize_defs[psize].shift;
649 max_hpte_count = 1U << (PMD_SHIFT - shift);
651 for (i = 0; i < max_hpte_count; i++) {
652 valid = hpte_valid(hpte_slot_array, i);
655 hidx = hpte_hash_index(hpte_slot_array, i);
658 addr = s_addr + (i * (1ul << shift));
659 vpn = hpt_vpn(addr, vsid, ssize);
660 hash = hpt_hash(vpn, shift, ssize);
661 if (hidx & _PTEIDX_SECONDARY)
664 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
665 slot += hidx & _PTEIDX_GROUP_IX;
667 slot_array[index] = slot;
668 vpn_array[index] = vpn;
669 if (index == PPC64_HUGE_HPTE_BATCH - 1) {
671 * Now do a bluk invalidate
673 __pSeries_lpar_hugepage_invalidate(slot_array,
675 PPC64_HUGE_HPTE_BATCH,
682 __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
683 index, psize, ssize);
686 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
688 unsigned char *hpte_slot_array,
689 int psize, int ssize, int local)
691 WARN(1, "%s called without THP support\n", __func__);
695 static int pSeries_lpar_hpte_removebolted(unsigned long ea,
696 int psize, int ssize)
699 unsigned long slot, vsid;
701 vsid = get_kernel_vsid(ea, ssize);
702 vpn = hpt_vpn(ea, vsid, ssize);
704 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
709 * lpar doesn't use the passed actual page size
711 pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0);
716 static inline unsigned long compute_slot(real_pte_t pte,
722 unsigned long slot, hash, hidx;
724 hash = hpt_hash(vpn, shift, ssize);
725 hidx = __rpte_to_hidx(pte, index);
726 if (hidx & _PTEIDX_SECONDARY)
728 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
729 slot += hidx & _PTEIDX_GROUP_IX;
734 * The hcall H_BLOCK_REMOVE implies that the virtual pages to processed are
735 * "all within the same naturally aligned 8 page virtual address block".
737 static void do_block_remove(unsigned long number, struct ppc64_tlb_batch *batch,
738 unsigned long *param)
741 unsigned long i, pix = 0;
742 unsigned long index, shift, slot, current_vpgb, vpgb;
746 psize = batch->psize;
747 ssize = batch->ssize;
749 for (i = 0; i < number; i++) {
752 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
754 * Shifting 3 bits more on the right to get a
755 * 8 pages aligned virtual addresse.
757 vpgb = (vpn >> (shift - VPN_SHIFT + 3));
758 if (!pix || vpgb != current_vpgb) {
760 * Need to start a new 8 pages block, flush
761 * the current one if needed.
764 (void)call_block_remove(pix, param,
767 param[0] = hpte_encode_avpn(vpn, psize,
772 slot = compute_slot(pte, vpn, index, shift, ssize);
773 param[pix++] = HBR_REQUEST | HBLKR_AVPN | slot;
775 if (pix == PLPAR_HCALL9_BUFSIZE) {
776 pix = call_block_remove(pix, param, false);
778 * pix = 0 means that all the entries were
779 * removed, we can start a new block.
780 * Otherwise, this means that there are entries
781 * to retry, and pix points to latest one, so
782 * we should increment it and try to continue
788 } pte_iterate_hashed_end();
792 (void)call_block_remove(pix, param, true);
796 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
799 static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
802 unsigned long i, pix, rc;
803 unsigned long flags = 0;
804 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
805 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
806 unsigned long param[PLPAR_HCALL9_BUFSIZE];
807 unsigned long index, shift, slot;
812 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
814 if (firmware_has_feature(FW_FEATURE_BLOCK_REMOVE)) {
815 do_block_remove(number, batch, param);
819 psize = batch->psize;
820 ssize = batch->ssize;
822 for (i = 0; i < number; i++) {
825 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
826 slot = compute_slot(pte, vpn, index, shift, ssize);
827 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
829 * lpar doesn't use the passed actual page size
831 pSeries_lpar_hpte_invalidate(slot, vpn, psize,
834 param[pix] = HBR_REQUEST | HBR_AVPN | slot;
835 param[pix+1] = hpte_encode_avpn(vpn, psize,
839 rc = plpar_hcall9(H_BULK_REMOVE, param,
840 param[0], param[1], param[2],
841 param[3], param[4], param[5],
843 BUG_ON(rc != H_SUCCESS);
847 } pte_iterate_hashed_end();
850 param[pix] = HBR_END;
851 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
852 param[2], param[3], param[4], param[5],
854 BUG_ON(rc != H_SUCCESS);
859 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
862 static int __init disable_bulk_remove(char *str)
864 if (strcmp(str, "off") == 0 &&
865 firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
866 pr_info("Disabling BULK_REMOVE firmware feature");
867 powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
872 __setup("bulk_remove=", disable_bulk_remove);
874 #define HPT_RESIZE_TIMEOUT 10000 /* ms */
876 struct hpt_resize_state {
881 static int pseries_lpar_resize_hpt_commit(void *data)
883 struct hpt_resize_state *state = data;
885 state->commit_rc = plpar_resize_hpt_commit(0, state->shift);
886 if (state->commit_rc != H_SUCCESS)
889 /* Hypervisor has transitioned the HTAB, update our globals */
890 ppc64_pft_size = state->shift;
891 htab_size_bytes = 1UL << ppc64_pft_size;
892 htab_hash_mask = (htab_size_bytes >> 7) - 1;
897 /* Must be called in user context */
898 static int pseries_lpar_resize_hpt(unsigned long shift)
900 struct hpt_resize_state state = {
902 .commit_rc = H_FUNCTION,
904 unsigned int delay, total_delay = 0;
910 if (!firmware_has_feature(FW_FEATURE_HPT_RESIZE))
913 pr_info("Attempting to resize HPT to shift %lu\n", shift);
917 rc = plpar_resize_hpt_prepare(0, shift);
918 while (H_IS_LONG_BUSY(rc)) {
919 delay = get_longbusy_msecs(rc);
920 total_delay += delay;
921 if (total_delay > HPT_RESIZE_TIMEOUT) {
922 /* prepare with shift==0 cancels an in-progress resize */
923 rc = plpar_resize_hpt_prepare(0, 0);
925 pr_warn("Unexpected error %d cancelling timed out HPT resize\n",
930 rc = plpar_resize_hpt_prepare(0, shift);
939 pr_warn("Invalid argument from H_RESIZE_HPT_PREPARE\n");
942 pr_warn("Operation not permitted from H_RESIZE_HPT_PREPARE\n");
945 pr_warn("Unexpected error %d from H_RESIZE_HPT_PREPARE\n", rc);
951 rc = stop_machine(pseries_lpar_resize_hpt_commit, &state, NULL);
956 switch (state.commit_rc) {
961 pr_warn("Unexpected error %d from H_RESIZE_HPT_COMMIT\n",
967 pr_info("HPT resize to shift %lu complete (%lld ms / %lld ms)\n",
968 shift, (long long) ktime_ms_delta(t1, t0),
969 (long long) ktime_ms_delta(t2, t1));
974 static int pseries_lpar_register_process_table(unsigned long base,
975 unsigned long page_size, unsigned long table_size)
978 unsigned long flags = 0;
981 flags |= PROC_TABLE_NEW;
983 flags |= PROC_TABLE_RADIX | PROC_TABLE_GTSE;
985 flags |= PROC_TABLE_HPT_SLB;
987 rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base,
988 page_size, table_size);
989 if (!H_IS_LONG_BUSY(rc))
991 mdelay(get_longbusy_msecs(rc));
993 if (rc != H_SUCCESS) {
994 pr_err("Failed to register process table (rc=%ld)\n", rc);
1000 void __init hpte_init_pseries(void)
1002 mmu_hash_ops.hpte_invalidate = pSeries_lpar_hpte_invalidate;
1003 mmu_hash_ops.hpte_updatepp = pSeries_lpar_hpte_updatepp;
1004 mmu_hash_ops.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
1005 mmu_hash_ops.hpte_insert = pSeries_lpar_hpte_insert;
1006 mmu_hash_ops.hpte_remove = pSeries_lpar_hpte_remove;
1007 mmu_hash_ops.hpte_removebolted = pSeries_lpar_hpte_removebolted;
1008 mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
1009 mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all;
1010 mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
1011 register_process_table = pseries_lpar_register_process_table;
1013 if (firmware_has_feature(FW_FEATURE_HPT_RESIZE))
1014 mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
1017 void radix_init_pseries(void)
1019 pr_info("Using radix MMU under hypervisor\n");
1020 register_process_table = pseries_lpar_register_process_table;
1023 #ifdef CONFIG_PPC_SMLPAR
1024 #define CMO_FREE_HINT_DEFAULT 1
1025 static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;
1027 static int __init cmo_free_hint(char *str)
1030 parm = strstrip(str);
1032 if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
1033 pr_info("%s: CMO free page hinting is not active.\n", __func__);
1034 cmo_free_hint_flag = 0;
1038 cmo_free_hint_flag = 1;
1039 pr_info("%s: CMO free page hinting is active.\n", __func__);
1041 if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
1047 __setup("cmo_free_hint=", cmo_free_hint);
1049 static void pSeries_set_page_state(struct page *page, int order,
1050 unsigned long state)
1053 unsigned long cmo_page_sz, addr;
1055 cmo_page_sz = cmo_get_page_size();
1056 addr = __pa((unsigned long)page_address(page));
1058 for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
1059 for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
1060 plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
1064 void arch_free_page(struct page *page, int order)
1066 if (radix_enabled())
1068 if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
1071 pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
1073 EXPORT_SYMBOL(arch_free_page);
1075 #endif /* CONFIG_PPC_SMLPAR */
1076 #endif /* CONFIG_PPC_BOOK3S_64 */
1078 #ifdef CONFIG_TRACEPOINTS
1079 #ifdef CONFIG_JUMP_LABEL
1080 struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
1082 int hcall_tracepoint_regfunc(void)
1084 static_key_slow_inc(&hcall_tracepoint_key);
1088 void hcall_tracepoint_unregfunc(void)
1090 static_key_slow_dec(&hcall_tracepoint_key);
1094 * We optimise our hcall path by placing hcall_tracepoint_refcount
1095 * directly in the TOC so we can check if the hcall tracepoints are
1096 * enabled via a single load.
1099 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
1100 extern long hcall_tracepoint_refcount;
1102 int hcall_tracepoint_regfunc(void)
1104 hcall_tracepoint_refcount++;
1108 void hcall_tracepoint_unregfunc(void)
1110 hcall_tracepoint_refcount--;
1115 * Since the tracing code might execute hcalls we need to guard against
1116 * recursion. One example of this are spinlocks calling H_YIELD on
1117 * shared processor partitions.
1119 static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
1122 void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
1124 unsigned long flags;
1125 unsigned int *depth;
1128 * We cannot call tracepoints inside RCU idle regions which
1129 * means we must not trace H_CEDE.
1131 if (opcode == H_CEDE)
1134 local_irq_save(flags);
1136 depth = this_cpu_ptr(&hcall_trace_depth);
1143 trace_hcall_entry(opcode, args);
1147 local_irq_restore(flags);
1150 void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
1152 unsigned long flags;
1153 unsigned int *depth;
1155 if (opcode == H_CEDE)
1158 local_irq_save(flags);
1160 depth = this_cpu_ptr(&hcall_trace_depth);
1166 trace_hcall_exit(opcode, retval, retbuf);
1171 local_irq_restore(flags);
1177 * H_GET_MPP hcall returns info in 7 parms
1179 int h_get_mpp(struct hvcall_mpp_data *mpp_data)
1182 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
1184 rc = plpar_hcall9(H_GET_MPP, retbuf);
1186 mpp_data->entitled_mem = retbuf[0];
1187 mpp_data->mapped_mem = retbuf[1];
1189 mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
1190 mpp_data->pool_num = retbuf[2] & 0xffff;
1192 mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
1193 mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
1194 mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
1196 mpp_data->pool_size = retbuf[4];
1197 mpp_data->loan_request = retbuf[5];
1198 mpp_data->backing_mem = retbuf[6];
1202 EXPORT_SYMBOL(h_get_mpp);
1204 int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
1207 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };
1209 rc = plpar_hcall9(H_GET_MPP_X, retbuf);
1211 mpp_x_data->coalesced_bytes = retbuf[0];
1212 mpp_x_data->pool_coalesced_bytes = retbuf[1];
1213 mpp_x_data->pool_purr_cycles = retbuf[2];
1214 mpp_x_data->pool_spurr_cycles = retbuf[3];
1219 static unsigned long vsid_unscramble(unsigned long vsid, int ssize)
1221 unsigned long protovsid;
1222 unsigned long va_bits = VA_BITS;
1223 unsigned long modinv, vsid_modulus;
1224 unsigned long max_mod_inv, tmp_modinv;
1226 if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
1229 if (ssize == MMU_SEGSIZE_256M) {
1230 modinv = VSID_MULINV_256M;
1231 vsid_modulus = ((1UL << (va_bits - SID_SHIFT)) - 1);
1233 modinv = VSID_MULINV_1T;
1234 vsid_modulus = ((1UL << (va_bits - SID_SHIFT_1T)) - 1);
1238 * vsid outside our range.
1240 if (vsid >= vsid_modulus)
1244 * If modinv is the modular multiplicate inverse of (x % vsid_modulus)
1245 * and vsid = (protovsid * x) % vsid_modulus, then we say:
1246 * protovsid = (vsid * modinv) % vsid_modulus
1249 /* Check if (vsid * modinv) overflow (63 bits) */
1250 max_mod_inv = 0x7fffffffffffffffull / vsid;
1251 if (modinv < max_mod_inv)
1252 return (vsid * modinv) % vsid_modulus;
1254 tmp_modinv = modinv/max_mod_inv;
1255 modinv %= max_mod_inv;
1257 protovsid = (((vsid * max_mod_inv) % vsid_modulus) * tmp_modinv) % vsid_modulus;
1258 protovsid = (protovsid + vsid * modinv) % vsid_modulus;
1263 static int __init reserve_vrma_context_id(void)
1265 unsigned long protovsid;
1268 * Reserve context ids which map to reserved virtual addresses. For now
1269 * we only reserve the context id which maps to the VRMA VSID. We ignore
1270 * the addresses in "ibm,adjunct-virtual-addresses" because we don't
1271 * enable adjunct support via the "ibm,client-architecture-support"
1274 protovsid = vsid_unscramble(VRMA_VSID, MMU_SEGSIZE_1T);
1275 hash__reserve_context_id(protovsid >> ESID_BITS_1T);
1278 machine_device_initcall(pseries, reserve_vrma_context_id);
1280 #ifdef CONFIG_DEBUG_FS
1281 /* debugfs file interface for vpa data */
1282 static ssize_t vpa_file_read(struct file *filp, char __user *buf, size_t len,
1285 int cpu = (long)filp->private_data;
1286 struct lppaca *lppaca = &lppaca_of(cpu);
1288 return simple_read_from_buffer(buf, len, pos, lppaca,
1289 sizeof(struct lppaca));
1292 static const struct file_operations vpa_fops = {
1293 .open = simple_open,
1294 .read = vpa_file_read,
1295 .llseek = default_llseek,
1298 static int __init vpa_debugfs_init(void)
1302 static struct dentry *vpa_dir;
1304 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
1307 vpa_dir = debugfs_create_dir("vpa", powerpc_debugfs_root);
1309 pr_warn("%s: can't create vpa root dir\n", __func__);
1313 /* set up the per-cpu vpa file*/
1314 for_each_possible_cpu(i) {
1317 sprintf(name, "cpu-%ld", i);
1319 d = debugfs_create_file(name, 0400, vpa_dir, (void *)i,
1322 pr_warn("%s: can't create per-cpu vpa file\n",
1330 machine_arch_initcall(pseries, vpa_debugfs_init);
1331 #endif /* CONFIG_DEBUG_FS */