| 1 | /* |
| 2 | * This file contains the routines for TLB flushing. |
| 3 | * On machines where the MMU does not use a hash table to store virtual to |
| 4 | * physical translations (ie, SW loaded TLBs or Book3E compilant processors, |
| 5 | * this does -not- include 603 however which shares the implementation with |
| 6 | * hash based processors) |
| 7 | * |
| 8 | * -- BenH |
| 9 | * |
| 10 | * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org> |
| 11 | * IBM Corp. |
| 12 | * |
| 13 | * Derived from arch/ppc/mm/init.c: |
| 14 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 15 | * |
| 16 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) |
| 17 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) |
| 18 | * Copyright (C) 1996 Paul Mackerras |
| 19 | * |
| 20 | * Derived from "arch/i386/mm/init.c" |
| 21 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 22 | * |
| 23 | * This program is free software; you can redistribute it and/or |
| 24 | * modify it under the terms of the GNU General Public License |
| 25 | * as published by the Free Software Foundation; either version |
| 26 | * 2 of the License, or (at your option) any later version. |
| 27 | * |
| 28 | */ |
| 29 | |
| 30 | #include <linux/kernel.h> |
| 31 | #include <linux/export.h> |
| 32 | #include <linux/mm.h> |
| 33 | #include <linux/init.h> |
| 34 | #include <linux/highmem.h> |
| 35 | #include <linux/pagemap.h> |
| 36 | #include <linux/preempt.h> |
| 37 | #include <linux/spinlock.h> |
| 38 | #include <linux/memblock.h> |
| 39 | #include <linux/of_fdt.h> |
| 40 | #include <linux/hugetlb.h> |
| 41 | |
| 42 | #include <asm/tlbflush.h> |
| 43 | #include <asm/tlb.h> |
| 44 | #include <asm/code-patching.h> |
| 45 | #include <asm/cputhreads.h> |
| 46 | #include <asm/hugetlb.h> |
| 47 | #include <asm/paca.h> |
| 48 | |
| 49 | #include "mmu_decl.h" |
| 50 | |
| 51 | /* |
| 52 | * This struct lists the sw-supported page sizes. The hardawre MMU may support |
| 53 | * other sizes not listed here. The .ind field is only used on MMUs that have |
| 54 | * indirect page table entries. |
| 55 | */ |
| 56 | #if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx) |
| 57 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 58 | struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { |
| 59 | [MMU_PAGE_4K] = { |
| 60 | .shift = 12, |
| 61 | .enc = BOOK3E_PAGESZ_4K, |
| 62 | }, |
| 63 | [MMU_PAGE_2M] = { |
| 64 | .shift = 21, |
| 65 | .enc = BOOK3E_PAGESZ_2M, |
| 66 | }, |
| 67 | [MMU_PAGE_4M] = { |
| 68 | .shift = 22, |
| 69 | .enc = BOOK3E_PAGESZ_4M, |
| 70 | }, |
| 71 | [MMU_PAGE_16M] = { |
| 72 | .shift = 24, |
| 73 | .enc = BOOK3E_PAGESZ_16M, |
| 74 | }, |
| 75 | [MMU_PAGE_64M] = { |
| 76 | .shift = 26, |
| 77 | .enc = BOOK3E_PAGESZ_64M, |
| 78 | }, |
| 79 | [MMU_PAGE_256M] = { |
| 80 | .shift = 28, |
| 81 | .enc = BOOK3E_PAGESZ_256M, |
| 82 | }, |
| 83 | [MMU_PAGE_1G] = { |
| 84 | .shift = 30, |
| 85 | .enc = BOOK3E_PAGESZ_1GB, |
| 86 | }, |
| 87 | }; |
| 88 | #elif defined(CONFIG_PPC_8xx) |
| 89 | struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { |
| 90 | /* we only manage 4k and 16k pages as normal pages */ |
| 91 | #ifdef CONFIG_PPC_4K_PAGES |
| 92 | [MMU_PAGE_4K] = { |
| 93 | .shift = 12, |
| 94 | }, |
| 95 | #else |
| 96 | [MMU_PAGE_16K] = { |
| 97 | .shift = 14, |
| 98 | }, |
| 99 | #endif |
| 100 | [MMU_PAGE_512K] = { |
| 101 | .shift = 19, |
| 102 | }, |
| 103 | [MMU_PAGE_8M] = { |
| 104 | .shift = 23, |
| 105 | }, |
| 106 | }; |
| 107 | #else |
| 108 | struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { |
| 109 | [MMU_PAGE_4K] = { |
| 110 | .shift = 12, |
| 111 | .ind = 20, |
| 112 | .enc = BOOK3E_PAGESZ_4K, |
| 113 | }, |
| 114 | [MMU_PAGE_16K] = { |
| 115 | .shift = 14, |
| 116 | .enc = BOOK3E_PAGESZ_16K, |
| 117 | }, |
| 118 | [MMU_PAGE_64K] = { |
| 119 | .shift = 16, |
| 120 | .ind = 28, |
| 121 | .enc = BOOK3E_PAGESZ_64K, |
| 122 | }, |
| 123 | [MMU_PAGE_1M] = { |
| 124 | .shift = 20, |
| 125 | .enc = BOOK3E_PAGESZ_1M, |
| 126 | }, |
| 127 | [MMU_PAGE_16M] = { |
| 128 | .shift = 24, |
| 129 | .ind = 36, |
| 130 | .enc = BOOK3E_PAGESZ_16M, |
| 131 | }, |
| 132 | [MMU_PAGE_256M] = { |
| 133 | .shift = 28, |
| 134 | .enc = BOOK3E_PAGESZ_256M, |
| 135 | }, |
| 136 | [MMU_PAGE_1G] = { |
| 137 | .shift = 30, |
| 138 | .enc = BOOK3E_PAGESZ_1GB, |
| 139 | }, |
| 140 | }; |
| 141 | #endif /* CONFIG_FSL_BOOKE */ |
| 142 | |
| 143 | static inline int mmu_get_tsize(int psize) |
| 144 | { |
| 145 | return mmu_psize_defs[psize].enc; |
| 146 | } |
| 147 | #else |
| 148 | static inline int mmu_get_tsize(int psize) |
| 149 | { |
| 150 | /* This isn't used on !Book3E for now */ |
| 151 | return 0; |
| 152 | } |
| 153 | #endif /* CONFIG_PPC_BOOK3E_MMU */ |
| 154 | |
| 155 | /* The variables below are currently only used on 64-bit Book3E |
| 156 | * though this will probably be made common with other nohash |
| 157 | * implementations at some point |
| 158 | */ |
| 159 | #ifdef CONFIG_PPC64 |
| 160 | |
| 161 | int mmu_linear_psize; /* Page size used for the linear mapping */ |
| 162 | int mmu_pte_psize; /* Page size used for PTE pages */ |
| 163 | int mmu_vmemmap_psize; /* Page size used for the virtual mem map */ |
| 164 | int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */ |
| 165 | unsigned long linear_map_top; /* Top of linear mapping */ |
| 166 | |
| 167 | |
| 168 | /* |
| 169 | * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug |
| 170 | * exceptions. This is used for bolted and e6500 TLB miss handlers which |
| 171 | * do not modify this SPRG in the TLB miss code; for other TLB miss handlers, |
| 172 | * this is set to zero. |
| 173 | */ |
| 174 | int extlb_level_exc; |
| 175 | |
| 176 | #endif /* CONFIG_PPC64 */ |
| 177 | |
| 178 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 179 | /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */ |
| 180 | DEFINE_PER_CPU(int, next_tlbcam_idx); |
| 181 | EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx); |
| 182 | #endif |
| 183 | |
| 184 | /* |
| 185 | * Base TLB flushing operations: |
| 186 | * |
| 187 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's |
| 188 | * - flush_tlb_page(vma, vmaddr) flushes one page |
| 189 | * - flush_tlb_range(vma, start, end) flushes a range of pages |
| 190 | * - flush_tlb_kernel_range(start, end) flushes kernel pages |
| 191 | * |
| 192 | * - local_* variants of page and mm only apply to the current |
| 193 | * processor |
| 194 | */ |
| 195 | |
| 196 | /* |
| 197 | * These are the base non-SMP variants of page and mm flushing |
| 198 | */ |
| 199 | void local_flush_tlb_mm(struct mm_struct *mm) |
| 200 | { |
| 201 | unsigned int pid; |
| 202 | |
| 203 | preempt_disable(); |
| 204 | pid = mm->context.id; |
| 205 | if (pid != MMU_NO_CONTEXT) |
| 206 | _tlbil_pid(pid); |
| 207 | preempt_enable(); |
| 208 | } |
| 209 | EXPORT_SYMBOL(local_flush_tlb_mm); |
| 210 | |
| 211 | void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, |
| 212 | int tsize, int ind) |
| 213 | { |
| 214 | unsigned int pid; |
| 215 | |
| 216 | preempt_disable(); |
| 217 | pid = mm ? mm->context.id : 0; |
| 218 | if (pid != MMU_NO_CONTEXT) |
| 219 | _tlbil_va(vmaddr, pid, tsize, ind); |
| 220 | preempt_enable(); |
| 221 | } |
| 222 | |
| 223 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) |
| 224 | { |
| 225 | __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, |
| 226 | mmu_get_tsize(mmu_virtual_psize), 0); |
| 227 | } |
| 228 | EXPORT_SYMBOL(local_flush_tlb_page); |
| 229 | |
| 230 | /* |
| 231 | * And here are the SMP non-local implementations |
| 232 | */ |
| 233 | #ifdef CONFIG_SMP |
| 234 | |
| 235 | static DEFINE_RAW_SPINLOCK(tlbivax_lock); |
| 236 | |
| 237 | struct tlb_flush_param { |
| 238 | unsigned long addr; |
| 239 | unsigned int pid; |
| 240 | unsigned int tsize; |
| 241 | unsigned int ind; |
| 242 | }; |
| 243 | |
| 244 | static void do_flush_tlb_mm_ipi(void *param) |
| 245 | { |
| 246 | struct tlb_flush_param *p = param; |
| 247 | |
| 248 | _tlbil_pid(p ? p->pid : 0); |
| 249 | } |
| 250 | |
| 251 | static void do_flush_tlb_page_ipi(void *param) |
| 252 | { |
| 253 | struct tlb_flush_param *p = param; |
| 254 | |
| 255 | _tlbil_va(p->addr, p->pid, p->tsize, p->ind); |
| 256 | } |
| 257 | |
| 258 | |
| 259 | /* Note on invalidations and PID: |
| 260 | * |
| 261 | * We snapshot the PID with preempt disabled. At this point, it can still |
| 262 | * change either because: |
| 263 | * - our context is being stolen (PID -> NO_CONTEXT) on another CPU |
| 264 | * - we are invaliating some target that isn't currently running here |
| 265 | * and is concurrently acquiring a new PID on another CPU |
| 266 | * - some other CPU is re-acquiring a lost PID for this mm |
| 267 | * etc... |
| 268 | * |
| 269 | * However, this shouldn't be a problem as we only guarantee |
| 270 | * invalidation of TLB entries present prior to this call, so we |
| 271 | * don't care about the PID changing, and invalidating a stale PID |
| 272 | * is generally harmless. |
| 273 | */ |
| 274 | |
| 275 | void flush_tlb_mm(struct mm_struct *mm) |
| 276 | { |
| 277 | unsigned int pid; |
| 278 | |
| 279 | preempt_disable(); |
| 280 | pid = mm->context.id; |
| 281 | if (unlikely(pid == MMU_NO_CONTEXT)) |
| 282 | goto no_context; |
| 283 | if (!mm_is_core_local(mm)) { |
| 284 | struct tlb_flush_param p = { .pid = pid }; |
| 285 | /* Ignores smp_processor_id() even if set. */ |
| 286 | smp_call_function_many(mm_cpumask(mm), |
| 287 | do_flush_tlb_mm_ipi, &p, 1); |
| 288 | } |
| 289 | _tlbil_pid(pid); |
| 290 | no_context: |
| 291 | preempt_enable(); |
| 292 | } |
| 293 | EXPORT_SYMBOL(flush_tlb_mm); |
| 294 | |
| 295 | void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, |
| 296 | int tsize, int ind) |
| 297 | { |
| 298 | struct cpumask *cpu_mask; |
| 299 | unsigned int pid; |
| 300 | |
| 301 | /* |
| 302 | * This function as well as __local_flush_tlb_page() must only be called |
| 303 | * for user contexts. |
| 304 | */ |
| 305 | if (WARN_ON(!mm)) |
| 306 | return; |
| 307 | |
| 308 | preempt_disable(); |
| 309 | pid = mm->context.id; |
| 310 | if (unlikely(pid == MMU_NO_CONTEXT)) |
| 311 | goto bail; |
| 312 | cpu_mask = mm_cpumask(mm); |
| 313 | if (!mm_is_core_local(mm)) { |
| 314 | /* If broadcast tlbivax is supported, use it */ |
| 315 | if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) { |
| 316 | int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL); |
| 317 | if (lock) |
| 318 | raw_spin_lock(&tlbivax_lock); |
| 319 | _tlbivax_bcast(vmaddr, pid, tsize, ind); |
| 320 | if (lock) |
| 321 | raw_spin_unlock(&tlbivax_lock); |
| 322 | goto bail; |
| 323 | } else { |
| 324 | struct tlb_flush_param p = { |
| 325 | .pid = pid, |
| 326 | .addr = vmaddr, |
| 327 | .tsize = tsize, |
| 328 | .ind = ind, |
| 329 | }; |
| 330 | /* Ignores smp_processor_id() even if set in cpu_mask */ |
| 331 | smp_call_function_many(cpu_mask, |
| 332 | do_flush_tlb_page_ipi, &p, 1); |
| 333 | } |
| 334 | } |
| 335 | _tlbil_va(vmaddr, pid, tsize, ind); |
| 336 | bail: |
| 337 | preempt_enable(); |
| 338 | } |
| 339 | |
| 340 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) |
| 341 | { |
| 342 | #ifdef CONFIG_HUGETLB_PAGE |
| 343 | if (vma && is_vm_hugetlb_page(vma)) |
| 344 | flush_hugetlb_page(vma, vmaddr); |
| 345 | #endif |
| 346 | |
| 347 | __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, |
| 348 | mmu_get_tsize(mmu_virtual_psize), 0); |
| 349 | } |
| 350 | EXPORT_SYMBOL(flush_tlb_page); |
| 351 | |
| 352 | #endif /* CONFIG_SMP */ |
| 353 | |
| 354 | #ifdef CONFIG_PPC_47x |
| 355 | void __init early_init_mmu_47x(void) |
| 356 | { |
| 357 | #ifdef CONFIG_SMP |
| 358 | unsigned long root = of_get_flat_dt_root(); |
| 359 | if (of_get_flat_dt_prop(root, "cooperative-partition", NULL)) |
| 360 | mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST); |
| 361 | #endif /* CONFIG_SMP */ |
| 362 | } |
| 363 | #endif /* CONFIG_PPC_47x */ |
| 364 | |
| 365 | /* |
| 366 | * Flush kernel TLB entries in the given range |
| 367 | */ |
| 368 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
| 369 | { |
| 370 | #ifdef CONFIG_SMP |
| 371 | preempt_disable(); |
| 372 | smp_call_function(do_flush_tlb_mm_ipi, NULL, 1); |
| 373 | _tlbil_pid(0); |
| 374 | preempt_enable(); |
| 375 | #else |
| 376 | _tlbil_pid(0); |
| 377 | #endif |
| 378 | } |
| 379 | EXPORT_SYMBOL(flush_tlb_kernel_range); |
| 380 | |
| 381 | /* |
| 382 | * Currently, for range flushing, we just do a full mm flush. This should |
| 383 | * be optimized based on a threshold on the size of the range, since |
| 384 | * some implementation can stack multiple tlbivax before a tlbsync but |
| 385 | * for now, we keep it that way |
| 386 | */ |
| 387 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
| 388 | unsigned long end) |
| 389 | |
| 390 | { |
| 391 | if (end - start == PAGE_SIZE && !(start & ~PAGE_MASK)) |
| 392 | flush_tlb_page(vma, start); |
| 393 | else |
| 394 | flush_tlb_mm(vma->vm_mm); |
| 395 | } |
| 396 | EXPORT_SYMBOL(flush_tlb_range); |
| 397 | |
| 398 | void tlb_flush(struct mmu_gather *tlb) |
| 399 | { |
| 400 | flush_tlb_mm(tlb->mm); |
| 401 | } |
| 402 | |
| 403 | /* |
| 404 | * Below are functions specific to the 64-bit variant of Book3E though that |
| 405 | * may change in the future |
| 406 | */ |
| 407 | |
| 408 | #ifdef CONFIG_PPC64 |
| 409 | |
| 410 | /* |
| 411 | * Handling of virtual linear page tables or indirect TLB entries |
| 412 | * flushing when PTE pages are freed |
| 413 | */ |
| 414 | void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address) |
| 415 | { |
| 416 | int tsize = mmu_psize_defs[mmu_pte_psize].enc; |
| 417 | |
| 418 | if (book3e_htw_mode != PPC_HTW_NONE) { |
| 419 | unsigned long start = address & PMD_MASK; |
| 420 | unsigned long end = address + PMD_SIZE; |
| 421 | unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift; |
| 422 | |
| 423 | /* This isn't the most optimal, ideally we would factor out the |
| 424 | * while preempt & CPU mask mucking around, or even the IPI but |
| 425 | * it will do for now |
| 426 | */ |
| 427 | while (start < end) { |
| 428 | __flush_tlb_page(tlb->mm, start, tsize, 1); |
| 429 | start += size; |
| 430 | } |
| 431 | } else { |
| 432 | unsigned long rmask = 0xf000000000000000ul; |
| 433 | unsigned long rid = (address & rmask) | 0x1000000000000000ul; |
| 434 | unsigned long vpte = address & ~rmask; |
| 435 | |
| 436 | #ifdef CONFIG_PPC_64K_PAGES |
| 437 | vpte = (vpte >> (PAGE_SHIFT - 4)) & ~0xfffful; |
| 438 | #else |
| 439 | vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful; |
| 440 | #endif |
| 441 | vpte |= rid; |
| 442 | __flush_tlb_page(tlb->mm, vpte, tsize, 0); |
| 443 | } |
| 444 | } |
| 445 | |
| 446 | static void setup_page_sizes(void) |
| 447 | { |
| 448 | unsigned int tlb0cfg; |
| 449 | unsigned int tlb0ps; |
| 450 | unsigned int eptcfg; |
| 451 | int i, psize; |
| 452 | |
| 453 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 454 | unsigned int mmucfg = mfspr(SPRN_MMUCFG); |
| 455 | int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E); |
| 456 | |
| 457 | if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) { |
| 458 | unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG); |
| 459 | unsigned int min_pg, max_pg; |
| 460 | |
| 461 | min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT; |
| 462 | max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT; |
| 463 | |
| 464 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { |
| 465 | struct mmu_psize_def *def; |
| 466 | unsigned int shift; |
| 467 | |
| 468 | def = &mmu_psize_defs[psize]; |
| 469 | shift = def->shift; |
| 470 | |
| 471 | if (shift == 0 || shift & 1) |
| 472 | continue; |
| 473 | |
| 474 | /* adjust to be in terms of 4^shift Kb */ |
| 475 | shift = (shift - 10) >> 1; |
| 476 | |
| 477 | if ((shift >= min_pg) && (shift <= max_pg)) |
| 478 | def->flags |= MMU_PAGE_SIZE_DIRECT; |
| 479 | } |
| 480 | |
| 481 | goto out; |
| 482 | } |
| 483 | |
| 484 | if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { |
| 485 | u32 tlb1cfg, tlb1ps; |
| 486 | |
| 487 | tlb0cfg = mfspr(SPRN_TLB0CFG); |
| 488 | tlb1cfg = mfspr(SPRN_TLB1CFG); |
| 489 | tlb1ps = mfspr(SPRN_TLB1PS); |
| 490 | eptcfg = mfspr(SPRN_EPTCFG); |
| 491 | |
| 492 | if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT)) |
| 493 | book3e_htw_mode = PPC_HTW_E6500; |
| 494 | |
| 495 | /* |
| 496 | * We expect 4K subpage size and unrestricted indirect size. |
| 497 | * The lack of a restriction on indirect size is a Freescale |
| 498 | * extension, indicated by PSn = 0 but SPSn != 0. |
| 499 | */ |
| 500 | if (eptcfg != 2) |
| 501 | book3e_htw_mode = PPC_HTW_NONE; |
| 502 | |
| 503 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { |
| 504 | struct mmu_psize_def *def = &mmu_psize_defs[psize]; |
| 505 | |
| 506 | if (!def->shift) |
| 507 | continue; |
| 508 | |
| 509 | if (tlb1ps & (1U << (def->shift - 10))) { |
| 510 | def->flags |= MMU_PAGE_SIZE_DIRECT; |
| 511 | |
| 512 | if (book3e_htw_mode && psize == MMU_PAGE_2M) |
| 513 | def->flags |= MMU_PAGE_SIZE_INDIRECT; |
| 514 | } |
| 515 | } |
| 516 | |
| 517 | goto out; |
| 518 | } |
| 519 | #endif |
| 520 | |
| 521 | tlb0cfg = mfspr(SPRN_TLB0CFG); |
| 522 | tlb0ps = mfspr(SPRN_TLB0PS); |
| 523 | eptcfg = mfspr(SPRN_EPTCFG); |
| 524 | |
| 525 | /* Look for supported direct sizes */ |
| 526 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { |
| 527 | struct mmu_psize_def *def = &mmu_psize_defs[psize]; |
| 528 | |
| 529 | if (tlb0ps & (1U << (def->shift - 10))) |
| 530 | def->flags |= MMU_PAGE_SIZE_DIRECT; |
| 531 | } |
| 532 | |
| 533 | /* Indirect page sizes supported ? */ |
| 534 | if ((tlb0cfg & TLBnCFG_IND) == 0 || |
| 535 | (tlb0cfg & TLBnCFG_PT) == 0) |
| 536 | goto out; |
| 537 | |
| 538 | book3e_htw_mode = PPC_HTW_IBM; |
| 539 | |
| 540 | /* Now, we only deal with one IND page size for each |
| 541 | * direct size. Hopefully all implementations today are |
| 542 | * unambiguous, but we might want to be careful in the |
| 543 | * future. |
| 544 | */ |
| 545 | for (i = 0; i < 3; i++) { |
| 546 | unsigned int ps, sps; |
| 547 | |
| 548 | sps = eptcfg & 0x1f; |
| 549 | eptcfg >>= 5; |
| 550 | ps = eptcfg & 0x1f; |
| 551 | eptcfg >>= 5; |
| 552 | if (!ps || !sps) |
| 553 | continue; |
| 554 | for (psize = 0; psize < MMU_PAGE_COUNT; psize++) { |
| 555 | struct mmu_psize_def *def = &mmu_psize_defs[psize]; |
| 556 | |
| 557 | if (ps == (def->shift - 10)) |
| 558 | def->flags |= MMU_PAGE_SIZE_INDIRECT; |
| 559 | if (sps == (def->shift - 10)) |
| 560 | def->ind = ps + 10; |
| 561 | } |
| 562 | } |
| 563 | |
| 564 | out: |
| 565 | /* Cleanup array and print summary */ |
| 566 | pr_info("MMU: Supported page sizes\n"); |
| 567 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { |
| 568 | struct mmu_psize_def *def = &mmu_psize_defs[psize]; |
| 569 | const char *__page_type_names[] = { |
| 570 | "unsupported", |
| 571 | "direct", |
| 572 | "indirect", |
| 573 | "direct & indirect" |
| 574 | }; |
| 575 | if (def->flags == 0) { |
| 576 | def->shift = 0; |
| 577 | continue; |
| 578 | } |
| 579 | pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10), |
| 580 | __page_type_names[def->flags & 0x3]); |
| 581 | } |
| 582 | } |
| 583 | |
| 584 | static void setup_mmu_htw(void) |
| 585 | { |
| 586 | /* |
| 587 | * If we want to use HW tablewalk, enable it by patching the TLB miss |
| 588 | * handlers to branch to the one dedicated to it. |
| 589 | */ |
| 590 | |
| 591 | switch (book3e_htw_mode) { |
| 592 | case PPC_HTW_IBM: |
| 593 | patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e); |
| 594 | patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e); |
| 595 | break; |
| 596 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 597 | case PPC_HTW_E6500: |
| 598 | extlb_level_exc = EX_TLB_SIZE; |
| 599 | patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e); |
| 600 | patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e); |
| 601 | break; |
| 602 | #endif |
| 603 | } |
| 604 | pr_info("MMU: Book3E HW tablewalk %s\n", |
| 605 | book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported"); |
| 606 | } |
| 607 | |
| 608 | /* |
| 609 | * Early initialization of the MMU TLB code |
| 610 | */ |
| 611 | static void early_init_this_mmu(void) |
| 612 | { |
| 613 | unsigned int mas4; |
| 614 | |
| 615 | /* Set MAS4 based on page table setting */ |
| 616 | |
| 617 | mas4 = 0x4 << MAS4_WIMGED_SHIFT; |
| 618 | switch (book3e_htw_mode) { |
| 619 | case PPC_HTW_E6500: |
| 620 | mas4 |= MAS4_INDD; |
| 621 | mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT; |
| 622 | mas4 |= MAS4_TLBSELD(1); |
| 623 | mmu_pte_psize = MMU_PAGE_2M; |
| 624 | break; |
| 625 | |
| 626 | case PPC_HTW_IBM: |
| 627 | mas4 |= MAS4_INDD; |
| 628 | #ifdef CONFIG_PPC_64K_PAGES |
| 629 | mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT; |
| 630 | mmu_pte_psize = MMU_PAGE_256M; |
| 631 | #else |
| 632 | mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT; |
| 633 | mmu_pte_psize = MMU_PAGE_1M; |
| 634 | #endif |
| 635 | break; |
| 636 | |
| 637 | case PPC_HTW_NONE: |
| 638 | #ifdef CONFIG_PPC_64K_PAGES |
| 639 | mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT; |
| 640 | #else |
| 641 | mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT; |
| 642 | #endif |
| 643 | mmu_pte_psize = mmu_virtual_psize; |
| 644 | break; |
| 645 | } |
| 646 | mtspr(SPRN_MAS4, mas4); |
| 647 | |
| 648 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 649 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { |
| 650 | unsigned int num_cams; |
| 651 | int __maybe_unused cpu = smp_processor_id(); |
| 652 | bool map = true; |
| 653 | |
| 654 | /* use a quarter of the TLBCAM for bolted linear map */ |
| 655 | num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; |
| 656 | |
| 657 | /* |
| 658 | * Only do the mapping once per core, or else the |
| 659 | * transient mapping would cause problems. |
| 660 | */ |
| 661 | #ifdef CONFIG_SMP |
| 662 | if (hweight32(get_tensr()) > 1) |
| 663 | map = false; |
| 664 | #endif |
| 665 | |
| 666 | if (map) |
| 667 | linear_map_top = map_mem_in_cams(linear_map_top, |
| 668 | num_cams, false); |
| 669 | } |
| 670 | #endif |
| 671 | |
| 672 | /* A sync won't hurt us after mucking around with |
| 673 | * the MMU configuration |
| 674 | */ |
| 675 | mb(); |
| 676 | } |
| 677 | |
| 678 | static void __init early_init_mmu_global(void) |
| 679 | { |
| 680 | /* XXX This will have to be decided at runtime, but right |
| 681 | * now our boot and TLB miss code hard wires it. Ideally |
| 682 | * we should find out a suitable page size and patch the |
| 683 | * TLB miss code (either that or use the PACA to store |
| 684 | * the value we want) |
| 685 | */ |
| 686 | mmu_linear_psize = MMU_PAGE_1G; |
| 687 | |
| 688 | /* XXX This should be decided at runtime based on supported |
| 689 | * page sizes in the TLB, but for now let's assume 16M is |
| 690 | * always there and a good fit (which it probably is) |
| 691 | * |
| 692 | * Freescale booke only supports 4K pages in TLB0, so use that. |
| 693 | */ |
| 694 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) |
| 695 | mmu_vmemmap_psize = MMU_PAGE_4K; |
| 696 | else |
| 697 | mmu_vmemmap_psize = MMU_PAGE_16M; |
| 698 | |
| 699 | /* XXX This code only checks for TLB 0 capabilities and doesn't |
| 700 | * check what page size combos are supported by the HW. It |
| 701 | * also doesn't handle the case where a separate array holds |
| 702 | * the IND entries from the array loaded by the PT. |
| 703 | */ |
| 704 | /* Look for supported page sizes */ |
| 705 | setup_page_sizes(); |
| 706 | |
| 707 | /* Look for HW tablewalk support */ |
| 708 | setup_mmu_htw(); |
| 709 | |
| 710 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 711 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { |
| 712 | if (book3e_htw_mode == PPC_HTW_NONE) { |
| 713 | extlb_level_exc = EX_TLB_SIZE; |
| 714 | patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); |
| 715 | patch_exception(0x1e0, |
| 716 | exc_instruction_tlb_miss_bolted_book3e); |
| 717 | } |
| 718 | } |
| 719 | #endif |
| 720 | |
| 721 | /* Set the global containing the top of the linear mapping |
| 722 | * for use by the TLB miss code |
| 723 | */ |
| 724 | linear_map_top = memblock_end_of_DRAM(); |
| 725 | } |
| 726 | |
| 727 | static void __init early_mmu_set_memory_limit(void) |
| 728 | { |
| 729 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 730 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { |
| 731 | /* |
| 732 | * Limit memory so we dont have linear faults. |
| 733 | * Unlike memblock_set_current_limit, which limits |
| 734 | * memory available during early boot, this permanently |
| 735 | * reduces the memory available to Linux. We need to |
| 736 | * do this because highmem is not supported on 64-bit. |
| 737 | */ |
| 738 | memblock_enforce_memory_limit(linear_map_top); |
| 739 | } |
| 740 | #endif |
| 741 | |
| 742 | memblock_set_current_limit(linear_map_top); |
| 743 | } |
| 744 | |
| 745 | /* boot cpu only */ |
| 746 | void __init early_init_mmu(void) |
| 747 | { |
| 748 | early_init_mmu_global(); |
| 749 | early_init_this_mmu(); |
| 750 | early_mmu_set_memory_limit(); |
| 751 | } |
| 752 | |
| 753 | void early_init_mmu_secondary(void) |
| 754 | { |
| 755 | early_init_this_mmu(); |
| 756 | } |
| 757 | |
| 758 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, |
| 759 | phys_addr_t first_memblock_size) |
| 760 | { |
| 761 | /* On non-FSL Embedded 64-bit, we adjust the RMA size to match |
| 762 | * the bolted TLB entry. We know for now that only 1G |
| 763 | * entries are supported though that may eventually |
| 764 | * change. |
| 765 | * |
| 766 | * on FSL Embedded 64-bit, usually all RAM is bolted, but with |
| 767 | * unusual memory sizes it's possible for some RAM to not be mapped |
| 768 | * (such RAM is not used at all by Linux, since we don't support |
| 769 | * highmem on 64-bit). We limit ppc64_rma_size to what would be |
| 770 | * mappable if this memblock is the only one. Additional memblocks |
| 771 | * can only increase, not decrease, the amount that ends up getting |
| 772 | * mapped. We still limit max to 1G even if we'll eventually map |
| 773 | * more. This is due to what the early init code is set up to do. |
| 774 | * |
| 775 | * We crop it to the size of the first MEMBLOCK to |
| 776 | * avoid going over total available memory just in case... |
| 777 | */ |
| 778 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 779 | if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { |
| 780 | unsigned long linear_sz; |
| 781 | unsigned int num_cams; |
| 782 | |
| 783 | /* use a quarter of the TLBCAM for bolted linear map */ |
| 784 | num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; |
| 785 | |
| 786 | linear_sz = map_mem_in_cams(first_memblock_size, num_cams, |
| 787 | true); |
| 788 | |
| 789 | ppc64_rma_size = min_t(u64, linear_sz, 0x40000000); |
| 790 | } else |
| 791 | #endif |
| 792 | ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); |
| 793 | |
| 794 | /* Finally limit subsequent allocations */ |
| 795 | memblock_set_current_limit(first_memblock_base + ppc64_rma_size); |
| 796 | } |
| 797 | #else /* ! CONFIG_PPC64 */ |
| 798 | void __init early_init_mmu(void) |
| 799 | { |
| 800 | #ifdef CONFIG_PPC_47x |
| 801 | early_init_mmu_47x(); |
| 802 | #endif |
| 803 | } |
| 804 | #endif /* CONFIG_PPC64 */ |