1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * native hashtable management.
5 * SMP scalability work:
6 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
11 #include <linux/spinlock.h>
12 #include <linux/bitops.h>
14 #include <linux/processor.h>
15 #include <linux/threads.h>
16 #include <linux/smp.h>
17 #include <linux/pgtable.h>
19 #include <asm/machdep.h>
21 #include <asm/mmu_context.h>
22 #include <asm/trace.h>
24 #include <asm/cputable.h>
26 #include <asm/kexec.h>
27 #include <asm/ppc-opcode.h>
28 #include <asm/feature-fixups.h>
30 #include <misc/cxl-base.h>
33 #define DBG_LOW(fmt...) udbg_printf(fmt)
35 #define DBG_LOW(fmt...)
39 #define HPTE_LOCK_BIT 3
41 #define HPTE_LOCK_BIT (56+3)
44 static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
47 static struct lockdep_map hpte_lock_map =
48 STATIC_LOCKDEP_MAP_INIT("hpte_lock", &hpte_lock_map);
50 static void acquire_hpte_lock(void)
52 lock_map_acquire(&hpte_lock_map);
55 static void release_hpte_lock(void)
57 lock_map_release(&hpte_lock_map);
60 static void acquire_hpte_lock(void)
64 static void release_hpte_lock(void)
69 static inline unsigned long ___tlbie(unsigned long vpn, int psize,
70 int apsize, int ssize)
77 * We need 14 to 65 bits of va for a tlibe of 4K page
78 * With vpn we ignore the lower VPN_SHIFT bits already.
79 * And top two bits are already ignored because we can
80 * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT
83 va = vpn << VPN_SHIFT;
85 * clear top 16 bits of 64bit va, non SLS segment
86 * Older versions of the architecture (2.02 and earler) require the
87 * masking of the top 16 bits.
89 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
90 va &= ~(0xffffULL << 48);
94 /* clear out bits after (52) [0....52.....63] */
95 va &= ~((1ul << (64 - 52)) - 1);
97 sllp = get_sllp_encoding(apsize);
99 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
100 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
104 /* We need 14 to 14 + i bits of va */
105 penc = mmu_psize_defs[psize].penc[apsize];
106 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
111 * We don't need all the bits, but rest of the bits
112 * must be ignored by the processor.
113 * vpn cover upto 65 bits of va. (0...65) and we need
116 va |= (vpn & 0xfe); /* AVAL */
118 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
119 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
126 static inline void fixup_tlbie_vpn(unsigned long vpn, int psize,
127 int apsize, int ssize)
129 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
130 /* Radix flush for a hash guest */
132 unsigned long rb,rs,prs,r,ric;
134 rb = PPC_BIT(52); /* IS = 2 */
135 rs = 0; /* lpid = 0 */
136 prs = 0; /* partition scoped */
137 r = 1; /* radix format */
138 ric = 0; /* RIC_FLSUH_TLB */
141 * Need the extra ptesync to make sure we don't
144 asm volatile("ptesync": : :"memory");
145 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
146 : : "r"(rb), "i"(r), "i"(prs),
147 "i"(ric), "r"(rs) : "memory");
151 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
152 /* Need the extra ptesync to ensure we don't reorder tlbie*/
153 asm volatile("ptesync": : :"memory");
154 ___tlbie(vpn, psize, apsize, ssize);
158 static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
162 rb = ___tlbie(vpn, psize, apsize, ssize);
163 trace_tlbie(0, 0, rb, 0, 0, 0, 0);
166 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
172 /* VPN_SHIFT can be atmost 12 */
173 va = vpn << VPN_SHIFT;
175 * clear top 16 bits of 64 bit va, non SLS segment
176 * Older versions of the architecture (2.02 and earler) require the
177 * masking of the top 16 bits.
179 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
180 va &= ~(0xffffULL << 48);
184 /* clear out bits after(52) [0....52.....63] */
185 va &= ~((1ul << (64 - 52)) - 1);
187 sllp = get_sllp_encoding(apsize);
189 asm volatile(ASM_FTR_IFSET("tlbiel %0", PPC_TLBIEL_v205(%0, 0), %1)
190 : : "r" (va), "i" (CPU_FTR_ARCH_206)
194 /* We need 14 to 14 + i bits of va */
195 penc = mmu_psize_defs[psize].penc[apsize];
196 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
201 * We don't need all the bits, but rest of the bits
202 * must be ignored by the processor.
203 * vpn cover upto 65 bits of va. (0...65) and we need
208 asm volatile(ASM_FTR_IFSET("tlbiel %0", PPC_TLBIEL_v205(%0, 1), %1)
209 : : "r" (va), "i" (CPU_FTR_ARCH_206)
213 trace_tlbie(0, 1, va, 0, 0, 0, 0);
217 static inline void tlbie(unsigned long vpn, int psize, int apsize,
218 int ssize, int local)
220 unsigned int use_local;
221 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
223 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
226 use_local = mmu_psize_defs[psize].tlbiel;
227 if (lock_tlbie && !use_local)
228 raw_spin_lock(&native_tlbie_lock);
229 asm volatile("ptesync": : :"memory");
231 __tlbiel(vpn, psize, apsize, ssize);
232 ppc_after_tlbiel_barrier();
234 __tlbie(vpn, psize, apsize, ssize);
235 fixup_tlbie_vpn(vpn, psize, apsize, ssize);
236 asm volatile("eieio; tlbsync; ptesync": : :"memory");
238 if (lock_tlbie && !use_local)
239 raw_spin_unlock(&native_tlbie_lock);
242 static inline void native_lock_hpte(struct hash_pte *hptep)
244 unsigned long *word = (unsigned long *)&hptep->v;
248 if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
251 while(test_bit(HPTE_LOCK_BIT, word))
257 static inline void native_unlock_hpte(struct hash_pte *hptep)
259 unsigned long *word = (unsigned long *)&hptep->v;
262 clear_bit_unlock(HPTE_LOCK_BIT, word);
265 static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
266 unsigned long pa, unsigned long rflags,
267 unsigned long vflags, int psize, int apsize, int ssize)
269 struct hash_pte *hptep = htab_address + hpte_group;
270 unsigned long hpte_v, hpte_r;
274 local_irq_save(flags);
276 if (!(vflags & HPTE_V_BOLTED)) {
277 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
278 " rflags=%lx, vflags=%lx, psize=%d)\n",
279 hpte_group, vpn, pa, rflags, vflags, psize);
282 for (i = 0; i < HPTES_PER_GROUP; i++) {
283 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
284 /* retry with lock held */
285 native_lock_hpte(hptep);
286 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
288 native_unlock_hpte(hptep);
294 if (i == HPTES_PER_GROUP) {
295 local_irq_restore(flags);
299 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
300 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
302 if (!(vflags & HPTE_V_BOLTED)) {
303 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
307 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
308 hpte_r = hpte_old_to_new_r(hpte_v, hpte_r);
309 hpte_v = hpte_old_to_new_v(hpte_v);
312 hptep->r = cpu_to_be64(hpte_r);
313 /* Guarantee the second dword is visible before the valid bit */
316 * Now set the first dword including the valid bit
317 * NOTE: this also unlocks the hpte
320 hptep->v = cpu_to_be64(hpte_v);
322 __asm__ __volatile__ ("ptesync" : : : "memory");
324 local_irq_restore(flags);
326 return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
329 static long native_hpte_remove(unsigned long hpte_group)
331 struct hash_pte *hptep;
334 unsigned long hpte_v;
336 DBG_LOW(" remove(group=%lx)\n", hpte_group);
338 /* pick a random entry to start at */
339 slot_offset = mftb() & 0x7;
341 for (i = 0; i < HPTES_PER_GROUP; i++) {
342 hptep = htab_address + hpte_group + slot_offset;
343 hpte_v = be64_to_cpu(hptep->v);
345 if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
346 /* retry with lock held */
347 native_lock_hpte(hptep);
348 hpte_v = be64_to_cpu(hptep->v);
349 if ((hpte_v & HPTE_V_VALID)
350 && !(hpte_v & HPTE_V_BOLTED))
352 native_unlock_hpte(hptep);
359 if (i == HPTES_PER_GROUP)
362 /* Invalidate the hpte. NOTE: this also unlocks it */
369 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
370 unsigned long vpn, int bpsize,
371 int apsize, int ssize, unsigned long flags)
373 struct hash_pte *hptep = htab_address + slot;
374 unsigned long hpte_v, want_v;
375 int ret = 0, local = 0;
376 unsigned long irqflags;
378 local_irq_save(irqflags);
380 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
382 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
383 vpn, want_v & HPTE_V_AVPN, slot, newpp);
385 hpte_v = hpte_get_old_v(hptep);
387 * We need to invalidate the TLB always because hpte_remove doesn't do
388 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
389 * random entry from it. When we do that we don't invalidate the TLB
390 * (hpte_remove) because we assume the old translation is still
391 * technically "valid".
393 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
394 DBG_LOW(" -> miss\n");
397 native_lock_hpte(hptep);
398 /* recheck with locks held */
399 hpte_v = hpte_get_old_v(hptep);
400 if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
401 !(hpte_v & HPTE_V_VALID))) {
404 DBG_LOW(" -> hit\n");
405 /* Update the HPTE */
406 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
407 ~(HPTE_R_PPP | HPTE_R_N)) |
408 (newpp & (HPTE_R_PPP | HPTE_R_N |
411 native_unlock_hpte(hptep);
414 if (flags & HPTE_LOCAL_UPDATE)
417 * Ensure it is out of the tlb too if it is not a nohpte fault
419 if (!(flags & HPTE_NOHPTE_UPDATE))
420 tlbie(vpn, bpsize, apsize, ssize, local);
422 local_irq_restore(irqflags);
427 static long __native_hpte_find(unsigned long want_v, unsigned long slot)
429 struct hash_pte *hptep;
430 unsigned long hpte_v;
433 for (i = 0; i < HPTES_PER_GROUP; i++) {
435 hptep = htab_address + slot;
436 hpte_v = hpte_get_old_v(hptep);
437 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
446 static long native_hpte_find(unsigned long vpn, int psize, int ssize)
448 unsigned long hpte_group;
449 unsigned long want_v;
453 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
454 want_v = hpte_encode_avpn(vpn, psize, ssize);
457 * We try to keep bolted entries always in primary hash
458 * But in some case we can find them in secondary too.
460 hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
461 slot = __native_hpte_find(want_v, hpte_group);
463 /* Try in secondary */
464 hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
465 slot = __native_hpte_find(want_v, hpte_group);
474 * Update the page protection bits. Intended to be used to create
475 * guard pages for kernel data structures on pages which are bolted
476 * in the HPT. Assumes pages being operated on will not be stolen.
478 * No need to lock here because we should be the only user.
480 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
481 int psize, int ssize)
486 struct hash_pte *hptep;
489 local_irq_save(flags);
491 vsid = get_kernel_vsid(ea, ssize);
492 vpn = hpt_vpn(ea, vsid, ssize);
494 slot = native_hpte_find(vpn, psize, ssize);
496 panic("could not find page to bolt\n");
497 hptep = htab_address + slot;
499 /* Update the HPTE */
500 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
501 ~(HPTE_R_PPP | HPTE_R_N)) |
502 (newpp & (HPTE_R_PPP | HPTE_R_N)));
504 * Ensure it is out of the tlb too. Bolted entries base and
505 * actual page size will be same.
507 tlbie(vpn, psize, psize, ssize, 0);
509 local_irq_restore(flags);
513 * Remove a bolted kernel entry. Memory hotplug uses this.
515 * No need to lock here because we should be the only user.
517 static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
522 struct hash_pte *hptep;
525 local_irq_save(flags);
527 vsid = get_kernel_vsid(ea, ssize);
528 vpn = hpt_vpn(ea, vsid, ssize);
530 slot = native_hpte_find(vpn, psize, ssize);
534 hptep = htab_address + slot;
536 VM_WARN_ON(!(be64_to_cpu(hptep->v) & HPTE_V_BOLTED));
538 /* Invalidate the hpte */
541 /* Invalidate the TLB */
542 tlbie(vpn, psize, psize, ssize, 0);
544 local_irq_restore(flags);
550 static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
551 int bpsize, int apsize, int ssize, int local)
553 struct hash_pte *hptep = htab_address + slot;
554 unsigned long hpte_v;
555 unsigned long want_v;
558 local_irq_save(flags);
560 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
562 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
563 hpte_v = hpte_get_old_v(hptep);
565 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
566 native_lock_hpte(hptep);
567 /* recheck with locks held */
568 hpte_v = hpte_get_old_v(hptep);
570 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
571 /* Invalidate the hpte. NOTE: this also unlocks it */
575 native_unlock_hpte(hptep);
578 * We need to invalidate the TLB always because hpte_remove doesn't do
579 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
580 * random entry from it. When we do that we don't invalidate the TLB
581 * (hpte_remove) because we assume the old translation is still
582 * technically "valid".
584 tlbie(vpn, bpsize, apsize, ssize, local);
586 local_irq_restore(flags);
589 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
590 static void native_hugepage_invalidate(unsigned long vsid,
592 unsigned char *hpte_slot_array,
593 int psize, int ssize, int local)
596 struct hash_pte *hptep;
597 int actual_psize = MMU_PAGE_16M;
598 unsigned int max_hpte_count, valid;
599 unsigned long flags, s_addr = addr;
600 unsigned long hpte_v, want_v, shift;
601 unsigned long hidx, vpn = 0, hash, slot;
603 shift = mmu_psize_defs[psize].shift;
604 max_hpte_count = 1U << (PMD_SHIFT - shift);
606 local_irq_save(flags);
607 for (i = 0; i < max_hpte_count; i++) {
608 valid = hpte_valid(hpte_slot_array, i);
611 hidx = hpte_hash_index(hpte_slot_array, i);
614 addr = s_addr + (i * (1ul << shift));
615 vpn = hpt_vpn(addr, vsid, ssize);
616 hash = hpt_hash(vpn, shift, ssize);
617 if (hidx & _PTEIDX_SECONDARY)
620 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
621 slot += hidx & _PTEIDX_GROUP_IX;
623 hptep = htab_address + slot;
624 want_v = hpte_encode_avpn(vpn, psize, ssize);
625 hpte_v = hpte_get_old_v(hptep);
627 /* Even if we miss, we need to invalidate the TLB */
628 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
629 /* recheck with locks held */
630 native_lock_hpte(hptep);
631 hpte_v = hpte_get_old_v(hptep);
633 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
634 /* Invalidate the hpte. NOTE: this also unlocks it */
638 native_unlock_hpte(hptep);
641 * We need to do tlb invalidate for all the address, tlbie
642 * instruction compares entry_VA in tlb with the VA specified
645 tlbie(vpn, psize, actual_psize, ssize, local);
647 local_irq_restore(flags);
650 static void native_hugepage_invalidate(unsigned long vsid,
652 unsigned char *hpte_slot_array,
653 int psize, int ssize, int local)
655 WARN(1, "%s called without THP support\n", __func__);
659 static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
660 int *psize, int *apsize, int *ssize, unsigned long *vpn)
662 unsigned long avpn, pteg, vpi;
663 unsigned long hpte_v = be64_to_cpu(hpte->v);
664 unsigned long hpte_r = be64_to_cpu(hpte->r);
665 unsigned long vsid, seg_off;
666 int size, a_size, shift;
667 /* Look at the 8 bit LP value */
668 unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
670 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
671 hpte_v = hpte_new_to_old_v(hpte_v, hpte_r);
672 hpte_r = hpte_new_to_old_r(hpte_r);
674 if (!(hpte_v & HPTE_V_LARGE)) {
676 a_size = MMU_PAGE_4K;
678 size = hpte_page_sizes[lp] & 0xf;
679 a_size = hpte_page_sizes[lp] >> 4;
681 /* This works for all page sizes, and for 256M and 1T segments */
682 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
683 shift = mmu_psize_defs[size].shift;
685 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
686 pteg = slot / HPTES_PER_GROUP;
687 if (hpte_v & HPTE_V_SECONDARY)
691 case MMU_SEGSIZE_256M:
692 /* We only have 28 - 23 bits of seg_off in avpn */
693 seg_off = (avpn & 0x1f) << 23;
695 /* We can find more bits from the pteg value */
697 vpi = (vsid ^ pteg) & htab_hash_mask;
698 seg_off |= vpi << shift;
700 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
703 /* We only have 40 - 23 bits of seg_off in avpn */
704 seg_off = (avpn & 0x1ffff) << 23;
707 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
708 seg_off |= vpi << shift;
710 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
720 * clear all mappings on kexec. All cpus are in real mode (or they will
721 * be when they isi), and we are the only one left. We rely on our kernel
722 * mapping being 0xC0's and the hardware ignoring those two real bits.
724 * This must be called with interrupts disabled.
726 * Taking the native_tlbie_lock is unsafe here due to the possibility of
727 * lockdep being on. On pre POWER5 hardware, not taking the lock could
728 * cause deadlock. POWER5 and newer not taking the lock is fine. This only
729 * gets called during boot before secondary CPUs have come up and during
730 * crashdump and all bets are off anyway.
732 * TODO: add batching support when enabled. remember, no dynamic memory here,
733 * although there is the control page available...
735 static notrace void native_hpte_clear(void)
737 unsigned long vpn = 0;
738 unsigned long slot, slots;
739 struct hash_pte *hptep = htab_address;
740 unsigned long hpte_v;
741 unsigned long pteg_count;
742 int psize, apsize, ssize;
744 pteg_count = htab_hash_mask + 1;
746 slots = pteg_count * HPTES_PER_GROUP;
748 for (slot = 0; slot < slots; slot++, hptep++) {
750 * we could lock the pte here, but we are the only cpu
751 * running, right? and for crash dump, we probably
752 * don't want to wait for a maybe bad cpu.
754 hpte_v = be64_to_cpu(hptep->v);
757 * Call __tlbie() here rather than tlbie() since we can't take the
760 if (hpte_v & HPTE_V_VALID) {
761 hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
763 ___tlbie(vpn, psize, apsize, ssize);
767 asm volatile("eieio; tlbsync; ptesync":::"memory");
771 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
772 * the lock all the time
774 static void native_flush_hash_range(unsigned long number, int local)
776 unsigned long vpn = 0;
777 unsigned long hash, index, hidx, shift, slot;
778 struct hash_pte *hptep;
779 unsigned long hpte_v;
780 unsigned long want_v;
783 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
784 unsigned long psize = batch->psize;
785 int ssize = batch->ssize;
787 unsigned int use_local;
789 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
790 mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
792 local_irq_save(flags);
794 for (i = 0; i < number; i++) {
798 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
799 hash = hpt_hash(vpn, shift, ssize);
800 hidx = __rpte_to_hidx(pte, index);
801 if (hidx & _PTEIDX_SECONDARY)
803 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
804 slot += hidx & _PTEIDX_GROUP_IX;
805 hptep = htab_address + slot;
806 want_v = hpte_encode_avpn(vpn, psize, ssize);
807 hpte_v = hpte_get_old_v(hptep);
809 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
811 /* lock and try again */
812 native_lock_hpte(hptep);
813 hpte_v = hpte_get_old_v(hptep);
815 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
816 native_unlock_hpte(hptep);
822 } pte_iterate_hashed_end();
826 asm volatile("ptesync":::"memory");
827 for (i = 0; i < number; i++) {
831 pte_iterate_hashed_subpages(pte, psize,
833 __tlbiel(vpn, psize, psize, ssize);
834 } pte_iterate_hashed_end();
836 ppc_after_tlbiel_barrier();
838 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
841 raw_spin_lock(&native_tlbie_lock);
843 asm volatile("ptesync":::"memory");
844 for (i = 0; i < number; i++) {
848 pte_iterate_hashed_subpages(pte, psize,
850 __tlbie(vpn, psize, psize, ssize);
851 } pte_iterate_hashed_end();
854 * Just do one more with the last used values.
856 fixup_tlbie_vpn(vpn, psize, psize, ssize);
857 asm volatile("eieio; tlbsync; ptesync":::"memory");
860 raw_spin_unlock(&native_tlbie_lock);
863 local_irq_restore(flags);
866 void __init hpte_init_native(void)
868 mmu_hash_ops.hpte_invalidate = native_hpte_invalidate;
869 mmu_hash_ops.hpte_updatepp = native_hpte_updatepp;
870 mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp;
871 mmu_hash_ops.hpte_removebolted = native_hpte_removebolted;
872 mmu_hash_ops.hpte_insert = native_hpte_insert;
873 mmu_hash_ops.hpte_remove = native_hpte_remove;
874 mmu_hash_ops.hpte_clear_all = native_hpte_clear;
875 mmu_hash_ops.flush_hash_range = native_flush_hash_range;
876 mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate;