1 // SPDX-License-Identifier: GPL-2.0
3 * PPC64 Huge TLB Page Support for hash based MMUs (POWER4 and later)
5 * Copyright (C) 2003 David Gibson, IBM Corporation.
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
12 #include <linux/hugetlb.h>
13 #include <asm/pgtable.h>
14 #include <asm/pgalloc.h>
15 #include <asm/cacheflush.h>
16 #include <asm/machdep.h>
18 extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
19 unsigned long pa, unsigned long rlags,
20 unsigned long vflags, int psize, int ssize);
22 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
23 pte_t *ptep, unsigned long trap, unsigned long flags,
24 int ssize, unsigned int shift, unsigned int mmu_psize)
28 unsigned long old_pte, new_pte;
29 unsigned long rflags, pa;
32 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
34 /* Search the Linux page table for a match with va */
35 vpn = hpt_vpn(ea, vsid, ssize);
37 /* At this point, we have a pte (old_pte) which can be used to build
38 * or update an HPTE. There are 2 cases:
40 * 1. There is a valid (present) pte with no associated HPTE (this is
41 * the most common case)
42 * 2. There is a valid (present) pte with an associated HPTE. The
43 * current values of the pp bits in the HPTE prevent access
44 * because we are doing software DIRTY bit management and the
45 * page is currently not DIRTY.
50 old_pte = pte_val(*ptep);
51 /* If PTE busy, retry the access */
52 if (unlikely(old_pte & H_PAGE_BUSY))
54 /* If PTE permissions don't match, take page fault */
55 if (unlikely(!check_pte_access(access, old_pte)))
58 /* Try to lock the PTE, add ACCESSED and DIRTY if it was
60 new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED;
61 if (access & _PAGE_WRITE)
62 new_pte |= _PAGE_DIRTY;
63 } while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
65 /* Make sure this is a hugetlb entry */
66 if (old_pte & (H_PAGE_THP_HUGE | _PAGE_DEVMAP))
69 rflags = htab_convert_pte_flags(new_pte);
70 if (unlikely(mmu_psize == MMU_PAGE_16G))
71 offset = PTRS_PER_PUD;
73 offset = PTRS_PER_PMD;
74 rpte = __real_pte(__pte(old_pte), ptep, offset);
76 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
77 /* No CPU has hugepages but lacks no execute, so we
78 * don't need to worry about that case */
79 rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
81 /* Check if pte already has an hpte (case 2) */
82 if (unlikely(old_pte & H_PAGE_HASHPTE)) {
83 /* There MIGHT be an HPTE for this pte */
86 gslot = pte_get_hash_gslot(vpn, shift, ssize, rpte, 0);
87 if (mmu_hash_ops.hpte_updatepp(gslot, rflags, vpn, mmu_psize,
88 mmu_psize, ssize, flags) == -1)
89 old_pte &= ~_PAGE_HPTEFLAGS;
92 if (likely(!(old_pte & H_PAGE_HASHPTE))) {
93 unsigned long hash = hpt_hash(vpn, shift, ssize);
95 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
97 /* clear HPTE slot informations in new PTE */
98 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
100 slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0,
104 * Hypervisor failure. Restore old pte and return -1
105 * similar to __hash_page_*
107 if (unlikely(slot == -2)) {
108 *ptep = __pte(old_pte);
109 hash_failure_debug(ea, access, vsid, trap, ssize,
110 mmu_psize, mmu_psize, old_pte);
114 new_pte |= pte_set_hidx(ptep, rpte, 0, slot, offset);
118 * No need to use ldarx/stdcx here
120 *ptep = __pte(new_pte & ~H_PAGE_BUSY);
124 pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
125 unsigned long addr, pte_t *ptep)
127 unsigned long pte_val;
129 * Clear the _PAGE_PRESENT so that no hardware parallel update is
130 * possible. Also keep the pte_present true so that we don't take
133 pte_val = pte_update(vma->vm_mm, addr, ptep,
134 _PAGE_PRESENT, _PAGE_INVALID, 1);
136 return __pte(pte_val);
139 void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
140 pte_t *ptep, pte_t old_pte, pte_t pte)
144 return radix__huge_ptep_modify_prot_commit(vma, addr, ptep,
146 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);