1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_H
3 #define _ASM_POWERPC_BOOK3S_64_HASH_H
7 * Common bits between 4K and 64K pages in a linux-style PTE.
8 * Additional bits may be defined in pgtable-hash64-*.h
11 #define H_PTE_NONE_MASK _PAGE_HPTEFLAGS
13 #ifdef CONFIG_PPC_64K_PAGES
14 #include <asm/book3s/64/hash-64k.h>
16 #include <asm/book3s/64/hash-4k.h>
20 * Size of EA range mapped by our pagetables.
22 #define H_PGTABLE_EADDR_SIZE (H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \
23 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
24 #define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
26 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_PPC_64K_PAGES)
28 * only with hash 64k we need to use the second half of pmd page table
29 * to store pointer to deposited pgtable_t
31 #define H_PMD_CACHE_INDEX (H_PMD_INDEX_SIZE + 1)
33 #define H_PMD_CACHE_INDEX H_PMD_INDEX_SIZE
36 * Define the address range of the kernel non-linear virtual area
38 #define H_KERN_VIRT_START ASM_CONST(0xD000000000000000)
39 #define H_KERN_VIRT_SIZE ASM_CONST(0x0000400000000000) /* 64T */
42 * The vmalloc space starts at the beginning of that region, and
43 * occupies half of it on hash CPUs and a quarter of it on Book3E
44 * (we keep a quarter for the virtual memmap)
46 #define H_VMALLOC_START H_KERN_VIRT_START
47 #define H_VMALLOC_SIZE ASM_CONST(0x380000000000) /* 56T */
48 #define H_VMALLOC_END (H_VMALLOC_START + H_VMALLOC_SIZE)
50 #define H_KERN_IO_START H_VMALLOC_END
55 #define REGION_SHIFT 60UL
56 #define REGION_MASK (0xfUL << REGION_SHIFT)
57 #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
59 #define VMALLOC_REGION_ID (REGION_ID(H_VMALLOC_START))
60 #define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
61 #define VMEMMAP_REGION_ID (0xfUL) /* Server only */
62 #define USER_REGION_ID (0UL)
65 * Defines the address of the vmemap area, in its own region on
68 #define H_VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT)
70 #ifdef CONFIG_PPC_MM_SLICES
71 #define HAVE_ARCH_UNMAPPED_AREA
72 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
73 #endif /* CONFIG_PPC_MM_SLICES */
77 #define _PTEIDX_SECONDARY 0x8
78 #define _PTEIDX_GROUP_IX 0x7
80 #define H_PMD_BAD_BITS (PTE_TABLE_SIZE-1)
81 #define H_PUD_BAD_BITS (PMD_TABLE_SIZE-1)
84 #define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS)
85 #define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS)
86 static inline int hash__pgd_bad(pgd_t pgd)
88 return (pgd_val(pgd) == 0);
90 #ifdef CONFIG_STRICT_KERNEL_RWX
91 extern void hash__mark_rodata_ro(void);
92 extern void hash__mark_initmem_nx(void);
95 extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
96 pte_t *ptep, unsigned long pte, int huge);
97 extern unsigned long htab_convert_pte_flags(unsigned long pteflags);
98 /* Atomic PTE updates */
99 static inline unsigned long hash__pte_update(struct mm_struct *mm,
101 pte_t *ptep, unsigned long clr,
105 __be64 old_be, tmp_be;
108 __asm__ __volatile__(
109 "1: ldarx %0,0,%3 # pte_update\n\
116 : "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
117 : "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep),
118 "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
120 /* huge pages use the old page table lock */
122 assert_pte_locked(mm, addr);
124 old = be64_to_cpu(old_be);
125 if (old & H_PAGE_HASHPTE)
126 hpte_need_flush(mm, addr, ptep, old, huge);
131 /* Set the dirty and/or accessed bits atomically in a linux PTE, this
132 * function doesn't need to flush the hash entry
134 static inline void hash__ptep_set_access_flags(pte_t *ptep, pte_t entry)
136 __be64 old, tmp, val, mask;
138 mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE |
139 _PAGE_EXEC | _PAGE_SOFT_DIRTY);
141 val = pte_raw(entry) & mask;
143 __asm__ __volatile__(
150 :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
151 :"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY))
155 static inline int hash__pte_same(pte_t pte_a, pte_t pte_b)
157 return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
160 static inline int hash__pte_none(pte_t pte)
162 return (pte_val(pte) & ~H_PTE_NONE_MASK) == 0;
165 unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift,
166 int ssize, real_pte_t rpte, unsigned int subpg_index);
168 /* This low level function performs the actual PTE insertion
169 * Setting the PTE depends on the MMU type and other factors. It's
170 * an horrible mess that I'm not going to try to clean up now but
171 * I'm keeping it in one place rather than spread around
173 static inline void hash__set_pte_at(struct mm_struct *mm, unsigned long addr,
174 pte_t *ptep, pte_t pte, int percpu)
177 * Anything else just stores the PTE normally. That covers all 64-bit
178 * cases, and 32-bit non-hash with 32-bit PTEs.
183 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
184 extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
185 pmd_t *pmdp, unsigned long old_pmd);
187 static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
188 unsigned long addr, pmd_t *pmdp,
189 unsigned long old_pmd)
191 WARN(1, "%s called with THP disabled\n", __func__);
193 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
196 extern int hash__map_kernel_page(unsigned long ea, unsigned long pa,
197 unsigned long flags);
198 extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
199 unsigned long page_size,
201 extern void hash__vmemmap_remove_mapping(unsigned long start,
202 unsigned long page_size);
204 int hash__create_section_mapping(unsigned long start, unsigned long end);
205 int hash__remove_section_mapping(unsigned long start, unsigned long end);
207 #endif /* !__ASSEMBLY__ */
208 #endif /* __KERNEL__ */
209 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */