1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_PGTABLE_H
3 #define _ASM_POWERPC_PGTABLE_H
6 #include <linux/mmdebug.h>
7 #include <linux/mmzone.h>
8 #include <asm/processor.h> /* For TASK_SIZE */
11 #include <asm/tlbflush.h>
15 #endif /* !__ASSEMBLY__ */
17 #ifdef CONFIG_PPC_BOOK3S
18 #include <asm/book3s/pgtable.h>
20 #include <asm/nohash/pgtable.h>
21 #endif /* !CONFIG_PPC_BOOK3S */
24 * Protection used for kernel text. We want the debuggers to be able to
25 * set breakpoints anywhere, so don't write protect the kernel text
26 * on platforms where such control is possible.
28 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \
29 defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
30 #define PAGE_KERNEL_TEXT PAGE_KERNEL_X
32 #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
35 /* Make modules code happy. We don't set RO yet */
36 #define PAGE_KERNEL_EXEC PAGE_KERNEL_X
38 /* Advertise special mapping type for AGP */
39 #define PAGE_AGP (PAGE_KERNEL_NC)
44 void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
45 pte_t pte, unsigned int nr);
46 #define set_ptes set_ptes
47 #define update_mmu_cache(vma, addr, ptep) \
48 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
50 #ifndef MAX_PTRS_PER_PGD
51 #define MAX_PTRS_PER_PGD PTRS_PER_PGD
54 /* Keep these as a macros to avoid include dependency mess */
55 #define pte_page(x) pfn_to_page(pte_pfn(x))
56 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
58 static inline unsigned long pte_pfn(pte_t pte)
60 return (pte_val(pte) & PTE_RPN_MASK) >> PTE_RPN_SHIFT;
64 * Select all bits except the pfn
66 static inline pgprot_t pte_pgprot(pte_t pte)
68 unsigned long pte_flags;
70 pte_flags = pte_val(pte) & ~PTE_RPN_MASK;
71 return __pgprot(pte_flags);
74 #ifndef pmd_page_vaddr
75 static inline const void *pmd_page_vaddr(pmd_t pmd)
77 return __va(pmd_val(pmd) & ~PMD_MASKED_BITS);
79 #define pmd_page_vaddr pmd_page_vaddr
82 * ZERO_PAGE is a global shared page that is always zero: used
83 * for zero-mapped memory areas etc..
85 extern unsigned long empty_zero_page[];
86 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
88 extern pgd_t swapper_pg_dir[];
90 extern void paging_init(void);
91 void poking_init(void);
93 extern unsigned long ioremap_bot;
94 extern const pgprot_t protection_map[16];
96 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
97 #define pmd_large(pmd) 0
100 /* can we use this in kvm */
101 unsigned long vmalloc_to_phys(void *vmalloc_addr);
103 void pgtable_cache_add(unsigned int shift);
105 pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va);
107 #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
108 void mark_initmem_nx(void);
110 static inline void mark_initmem_nx(void) { }
114 * When used, PTE_FRAG_NR is defined in subarch pgtable.h
115 * so we are sure it is included when arriving here.
118 static inline void *pte_frag_get(mm_context_t *ctx)
120 return ctx->pte_frag;
123 static inline void pte_frag_set(mm_context_t *ctx, void *p)
128 #define PTE_FRAG_NR 1
129 #define PTE_FRAG_SIZE_SHIFT PAGE_SHIFT
130 #define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
132 static inline void *pte_frag_get(mm_context_t *ctx)
137 static inline void pte_frag_set(mm_context_t *ctx, void *p)
143 #define pmd_is_leaf pmd_is_leaf
144 static inline bool pmd_is_leaf(pmd_t pmd)
151 #define pud_is_leaf pud_is_leaf
152 static inline bool pud_is_leaf(pud_t pud)
159 #define p4d_is_leaf p4d_is_leaf
160 static inline bool p4d_is_leaf(p4d_t p4d)
166 #define pmd_pgtable pmd_pgtable
167 static inline pgtable_t pmd_pgtable(pmd_t pmd)
169 return (pgtable_t)pmd_page_vaddr(pmd);
173 int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size);
174 bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
175 unsigned long page_size);
177 * mm/memory_hotplug.c:mhp_supports_memmap_on_memory goes into details
178 * some of the restrictions. We don't check for PMD_SIZE because our
179 * vmemmap allocation code can fallback correctly. The pageblock
180 * alignment requirement is met using altmap->reserve blocks.
182 #define arch_supports_memmap_on_memory arch_supports_memmap_on_memory
183 static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)
185 if (!radix_enabled())
188 * With 4K page size and 2M PMD_SIZE, we can align
189 * things better with memory block size value
190 * starting from 128MB. Hence align things with PMD_SIZE.
192 if (IS_ENABLED(CONFIG_PPC_4K_PAGES))
193 return IS_ALIGNED(vmemmap_size, PMD_SIZE);
197 #endif /* CONFIG_PPC64 */
199 #endif /* __ASSEMBLY__ */
201 #endif /* _ASM_POWERPC_PGTABLE_H */