| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H |
| 3 | #define _ASM_POWERPC_NOHASH_32_PGTABLE_H |
| 4 | |
| 5 | #define __ARCH_USE_5LEVEL_HACK |
| 6 | #include <asm-generic/pgtable-nopmd.h> |
| 7 | |
| 8 | #ifndef __ASSEMBLY__ |
| 9 | #include <linux/sched.h> |
| 10 | #include <linux/threads.h> |
| 11 | #include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */ |
| 12 | #include <asm/asm-405.h> |
| 13 | |
| 14 | extern unsigned long ioremap_bot; |
| 15 | |
| 16 | #ifdef CONFIG_44x |
| 17 | extern int icache_44x_need_flush; |
| 18 | #endif |
| 19 | |
| 20 | #endif /* __ASSEMBLY__ */ |
| 21 | |
| 22 | #define PTE_INDEX_SIZE PTE_SHIFT |
| 23 | #define PMD_INDEX_SIZE 0 |
| 24 | #define PUD_INDEX_SIZE 0 |
| 25 | #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) |
| 26 | |
| 27 | #define PMD_CACHE_INDEX PMD_INDEX_SIZE |
| 28 | #define PUD_CACHE_INDEX PUD_INDEX_SIZE |
| 29 | |
| 30 | #ifndef __ASSEMBLY__ |
| 31 | #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) |
| 32 | #define PMD_TABLE_SIZE 0 |
| 33 | #define PUD_TABLE_SIZE 0 |
| 34 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) |
| 35 | #endif /* __ASSEMBLY__ */ |
| 36 | |
| 37 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) |
| 38 | #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) |
| 39 | |
| 40 | /* |
| 41 | * The normal case is that PTEs are 32-bits and we have a 1-page |
| 42 | * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus |
| 43 | * |
| 44 | * For any >32-bit physical address platform, we can use the following |
| 45 | * two level page table layout where the pgdir is 8KB and the MS 13 bits |
| 46 | * are an index to the second level table. The combined pgdir/pmd first |
| 47 | * level has 2048 entries and the second level has 512 64-bit PTE entries. |
| 48 | * -Matt |
| 49 | */ |
| 50 | /* PGDIR_SHIFT determines what a top-level page table entry can map */ |
| 51 | #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) |
| 52 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
| 53 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
| 54 | |
| 55 | /* Bits to mask out from a PGD to get to the PUD page */ |
| 56 | #define PGD_MASKED_BITS 0 |
| 57 | |
| 58 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) |
| 59 | #define FIRST_USER_ADDRESS 0UL |
| 60 | |
| 61 | #define pte_ERROR(e) \ |
| 62 | pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ |
| 63 | (unsigned long long)pte_val(e)) |
| 64 | #define pgd_ERROR(e) \ |
| 65 | pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
| 66 | |
| 67 | /* |
| 68 | * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary |
| 69 | * value (for now) on others, from where we can start layout kernel |
| 70 | * virtual space that goes below PKMAP and FIXMAP |
| 71 | */ |
| 72 | #ifdef CONFIG_HIGHMEM |
| 73 | #define KVIRT_TOP PKMAP_BASE |
| 74 | #else |
| 75 | #define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */ |
| 76 | #endif |
| 77 | |
| 78 | /* |
| 79 | * ioremap_bot starts at that address. Early ioremaps move down from there, |
| 80 | * until mem_init() at which point this becomes the top of the vmalloc |
| 81 | * and ioremap space |
| 82 | */ |
| 83 | #ifdef CONFIG_NOT_COHERENT_CACHE |
| 84 | #define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK) |
| 85 | #else |
| 86 | #define IOREMAP_TOP KVIRT_TOP |
| 87 | #endif |
| 88 | |
| 89 | /* |
| 90 | * Just any arbitrary offset to the start of the vmalloc VM area: the |
| 91 | * current 16MB value just means that there will be a 64MB "hole" after the |
| 92 | * physical memory until the kernel virtual memory starts. That means that |
| 93 | * any out-of-bounds memory accesses will hopefully be caught. |
| 94 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced |
| 95 | * area for the same reason. ;) |
| 96 | * |
| 97 | * We no longer map larger than phys RAM with the BATs so we don't have |
| 98 | * to worry about the VMALLOC_OFFSET causing problems. We do have to worry |
| 99 | * about clashes between our early calls to ioremap() that start growing down |
| 100 | * from IOREMAP_TOP being run into the VM area allocations (growing upwards |
| 101 | * from VMALLOC_START). For this reason we have ioremap_bot to check when |
| 102 | * we actually run into our mappings setup in the early boot with the VM |
| 103 | * system. This really does become a problem for machines with good amounts |
| 104 | * of RAM. -- Cort |
| 105 | */ |
| 106 | #define VMALLOC_OFFSET (0x1000000) /* 16M */ |
| 107 | #ifdef PPC_PIN_SIZE |
| 108 | #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) |
| 109 | #else |
| 110 | #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) |
| 111 | #endif |
| 112 | #define VMALLOC_END ioremap_bot |
| 113 | |
| 114 | /* |
| 115 | * Bits in a linux-style PTE. These match the bits in the |
| 116 | * (hardware-defined) PowerPC PTE as closely as possible. |
| 117 | */ |
| 118 | |
| 119 | #if defined(CONFIG_40x) |
| 120 | #include <asm/nohash/32/pte-40x.h> |
| 121 | #elif defined(CONFIG_44x) |
| 122 | #include <asm/nohash/32/pte-44x.h> |
| 123 | #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) |
| 124 | #include <asm/nohash/pte-book3e.h> |
| 125 | #elif defined(CONFIG_FSL_BOOKE) |
| 126 | #include <asm/nohash/32/pte-fsl-booke.h> |
| 127 | #elif defined(CONFIG_PPC_8xx) |
| 128 | #include <asm/nohash/32/pte-8xx.h> |
| 129 | #endif |
| 130 | |
| 131 | /* And here we include common definitions */ |
| 132 | #include <asm/pte-common.h> |
| 133 | |
| 134 | #ifndef __ASSEMBLY__ |
| 135 | |
| 136 | #define pte_clear(mm, addr, ptep) \ |
| 137 | do { pte_update(ptep, ~0, 0); } while (0) |
| 138 | |
| 139 | #define pmd_none(pmd) (!pmd_val(pmd)) |
| 140 | #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) |
| 141 | #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) |
| 142 | static inline void pmd_clear(pmd_t *pmdp) |
| 143 | { |
| 144 | *pmdp = __pmd(0); |
| 145 | } |
| 146 | |
| 147 | |
| 148 | |
| 149 | /* |
| 150 | * PTE updates. This function is called whenever an existing |
| 151 | * valid PTE is updated. This does -not- include set_pte_at() |
| 152 | * which nowadays only sets a new PTE. |
| 153 | * |
| 154 | * Depending on the type of MMU, we may need to use atomic updates |
| 155 | * and the PTE may be either 32 or 64 bit wide. In the later case, |
| 156 | * when using atomic updates, only the low part of the PTE is |
| 157 | * accessed atomically. |
| 158 | * |
| 159 | * In addition, on 44x, we also maintain a global flag indicating |
| 160 | * that an executable user mapping was modified, which is needed |
| 161 | * to properly flush the virtually tagged instruction cache of |
| 162 | * those implementations. |
| 163 | */ |
| 164 | #ifndef CONFIG_PTE_64BIT |
| 165 | static inline unsigned long pte_update(pte_t *p, |
| 166 | unsigned long clr, |
| 167 | unsigned long set) |
| 168 | { |
| 169 | #ifdef PTE_ATOMIC_UPDATES |
| 170 | unsigned long old, tmp; |
| 171 | |
| 172 | __asm__ __volatile__("\ |
| 173 | 1: lwarx %0,0,%3\n\ |
| 174 | andc %1,%0,%4\n\ |
| 175 | or %1,%1,%5\n" |
| 176 | PPC405_ERR77(0,%3) |
| 177 | " stwcx. %1,0,%3\n\ |
| 178 | bne- 1b" |
| 179 | : "=&r" (old), "=&r" (tmp), "=m" (*p) |
| 180 | : "r" (p), "r" (clr), "r" (set), "m" (*p) |
| 181 | : "cc" ); |
| 182 | #else /* PTE_ATOMIC_UPDATES */ |
| 183 | unsigned long old = pte_val(*p); |
| 184 | *p = __pte((old & ~clr) | set); |
| 185 | #endif /* !PTE_ATOMIC_UPDATES */ |
| 186 | |
| 187 | #ifdef CONFIG_44x |
| 188 | if ((old & _PAGE_USER) && (old & _PAGE_EXEC)) |
| 189 | icache_44x_need_flush = 1; |
| 190 | #endif |
| 191 | return old; |
| 192 | } |
| 193 | #else /* CONFIG_PTE_64BIT */ |
| 194 | static inline unsigned long long pte_update(pte_t *p, |
| 195 | unsigned long clr, |
| 196 | unsigned long set) |
| 197 | { |
| 198 | #ifdef PTE_ATOMIC_UPDATES |
| 199 | unsigned long long old; |
| 200 | unsigned long tmp; |
| 201 | |
| 202 | __asm__ __volatile__("\ |
| 203 | 1: lwarx %L0,0,%4\n\ |
| 204 | lwzx %0,0,%3\n\ |
| 205 | andc %1,%L0,%5\n\ |
| 206 | or %1,%1,%6\n" |
| 207 | PPC405_ERR77(0,%3) |
| 208 | " stwcx. %1,0,%4\n\ |
| 209 | bne- 1b" |
| 210 | : "=&r" (old), "=&r" (tmp), "=m" (*p) |
| 211 | : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) |
| 212 | : "cc" ); |
| 213 | #else /* PTE_ATOMIC_UPDATES */ |
| 214 | unsigned long long old = pte_val(*p); |
| 215 | *p = __pte((old & ~(unsigned long long)clr) | set); |
| 216 | #endif /* !PTE_ATOMIC_UPDATES */ |
| 217 | |
| 218 | #ifdef CONFIG_44x |
| 219 | if ((old & _PAGE_USER) && (old & _PAGE_EXEC)) |
| 220 | icache_44x_need_flush = 1; |
| 221 | #endif |
| 222 | return old; |
| 223 | } |
| 224 | #endif /* CONFIG_PTE_64BIT */ |
| 225 | |
| 226 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
| 227 | static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) |
| 228 | { |
| 229 | unsigned long old; |
| 230 | old = pte_update(ptep, _PAGE_ACCESSED, 0); |
| 231 | return (old & _PAGE_ACCESSED) != 0; |
| 232 | } |
| 233 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ |
| 234 | __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) |
| 235 | |
| 236 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
| 237 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
| 238 | pte_t *ptep) |
| 239 | { |
| 240 | return __pte(pte_update(ptep, ~0, 0)); |
| 241 | } |
| 242 | |
| 243 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
| 244 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, |
| 245 | pte_t *ptep) |
| 246 | { |
| 247 | pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO); |
| 248 | } |
| 249 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, |
| 250 | unsigned long addr, pte_t *ptep) |
| 251 | { |
| 252 | ptep_set_wrprotect(mm, addr, ptep); |
| 253 | } |
| 254 | |
| 255 | |
| 256 | static inline void __ptep_set_access_flags(struct vm_area_struct *vma, |
| 257 | pte_t *ptep, pte_t entry, |
| 258 | unsigned long address, |
| 259 | int psize) |
| 260 | { |
| 261 | unsigned long set = pte_val(entry) & |
| 262 | (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); |
| 263 | unsigned long clr = ~pte_val(entry) & (_PAGE_RO | _PAGE_NA); |
| 264 | |
| 265 | pte_update(ptep, clr, set); |
| 266 | |
| 267 | flush_tlb_page(vma, address); |
| 268 | } |
| 269 | |
| 270 | static inline int pte_young(pte_t pte) |
| 271 | { |
| 272 | return pte_val(pte) & _PAGE_ACCESSED; |
| 273 | } |
| 274 | |
| 275 | #define __HAVE_ARCH_PTE_SAME |
| 276 | #define pte_same(A,B) ((pte_val(A) ^ pte_val(B)) == 0) |
| 277 | |
| 278 | /* |
| 279 | * Note that on Book E processors, the pmd contains the kernel virtual |
| 280 | * (lowmem) address of the pte page. The physical address is less useful |
| 281 | * because everything runs with translation enabled (even the TLB miss |
| 282 | * handler). On everything else the pmd contains the physical address |
| 283 | * of the pte page. -- paulus |
| 284 | */ |
| 285 | #ifndef CONFIG_BOOKE |
| 286 | #define pmd_page_vaddr(pmd) \ |
| 287 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) |
| 288 | #define pmd_page(pmd) \ |
| 289 | pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) |
| 290 | #else |
| 291 | #define pmd_page_vaddr(pmd) \ |
| 292 | ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) |
| 293 | #define pmd_page(pmd) \ |
| 294 | pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) |
| 295 | #endif |
| 296 | |
| 297 | /* to find an entry in a kernel page-table-directory */ |
| 298 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
| 299 | |
| 300 | /* to find an entry in a page-table-directory */ |
| 301 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) |
| 302 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
| 303 | |
| 304 | /* Find an entry in the third-level page table.. */ |
| 305 | #define pte_index(address) \ |
| 306 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
| 307 | #define pte_offset_kernel(dir, addr) \ |
| 308 | (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \ |
| 309 | pte_index(addr)) |
| 310 | #define pte_offset_map(dir, addr) \ |
| 311 | ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) |
| 312 | #define pte_unmap(pte) kunmap_atomic(pte) |
| 313 | |
| 314 | /* |
| 315 | * Encode and decode a swap entry. |
| 316 | * Note that the bits we use in a PTE for representing a swap entry |
| 317 | * must not include the _PAGE_PRESENT bit. |
| 318 | * -- paulus |
| 319 | */ |
| 320 | #define __swp_type(entry) ((entry).val & 0x1f) |
| 321 | #define __swp_offset(entry) ((entry).val >> 5) |
| 322 | #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) |
| 323 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) |
| 324 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) |
| 325 | |
| 326 | int map_kernel_page(unsigned long va, phys_addr_t pa, int flags); |
| 327 | |
| 328 | #endif /* !__ASSEMBLY__ */ |
| 329 | |
| 330 | #endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */ |