Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * PowerPC64 port by Mike Corrigan and Dave Engebretsen | |
3 | * {mikejc|engebret}@us.ibm.com | |
4 | * | |
5 | * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com> | |
6 | * | |
7 | * SMP scalability work: | |
8 | * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM | |
9 | * | |
10 | * Module name: htab.c | |
11 | * | |
12 | * Description: | |
13 | * PowerPC Hashed Page Table functions | |
14 | * | |
15 | * This program is free software; you can redistribute it and/or | |
16 | * modify it under the terms of the GNU General Public License | |
17 | * as published by the Free Software Foundation; either version | |
18 | * 2 of the License, or (at your option) any later version. | |
19 | */ | |
20 | ||
21 | #undef DEBUG | |
3c726f8d | 22 | #undef DEBUG_LOW |
1da177e4 | 23 | |
1da177e4 LT |
24 | #include <linux/spinlock.h> |
25 | #include <linux/errno.h> | |
26 | #include <linux/sched.h> | |
27 | #include <linux/proc_fs.h> | |
28 | #include <linux/stat.h> | |
29 | #include <linux/sysctl.h> | |
66b15db6 | 30 | #include <linux/export.h> |
1da177e4 LT |
31 | #include <linux/ctype.h> |
32 | #include <linux/cache.h> | |
33 | #include <linux/init.h> | |
34 | #include <linux/signal.h> | |
95f72d1e | 35 | #include <linux/memblock.h> |
ba12eede | 36 | #include <linux/context_tracking.h> |
1da177e4 | 37 | |
1da177e4 LT |
38 | #include <asm/processor.h> |
39 | #include <asm/pgtable.h> | |
40 | #include <asm/mmu.h> | |
41 | #include <asm/mmu_context.h> | |
42 | #include <asm/page.h> | |
43 | #include <asm/types.h> | |
1da177e4 LT |
44 | #include <asm/uaccess.h> |
45 | #include <asm/machdep.h> | |
d9b2b2a2 | 46 | #include <asm/prom.h> |
1da177e4 LT |
47 | #include <asm/tlbflush.h> |
48 | #include <asm/io.h> | |
49 | #include <asm/eeh.h> | |
50 | #include <asm/tlb.h> | |
51 | #include <asm/cacheflush.h> | |
52 | #include <asm/cputable.h> | |
1da177e4 | 53 | #include <asm/sections.h> |
be3ebfe8 | 54 | #include <asm/copro.h> |
aa39be09 | 55 | #include <asm/udbg.h> |
b68a70c4 | 56 | #include <asm/code-patching.h> |
3ccc00a7 | 57 | #include <asm/fadump.h> |
f5339277 | 58 | #include <asm/firmware.h> |
bc2a9408 | 59 | #include <asm/tm.h> |
cfcb3d80 | 60 | #include <asm/trace.h> |
1da177e4 LT |
61 | |
62 | #ifdef DEBUG | |
63 | #define DBG(fmt...) udbg_printf(fmt) | |
64 | #else | |
65 | #define DBG(fmt...) | |
66 | #endif | |
67 | ||
3c726f8d BH |
68 | #ifdef DEBUG_LOW |
69 | #define DBG_LOW(fmt...) udbg_printf(fmt) | |
70 | #else | |
71 | #define DBG_LOW(fmt...) | |
72 | #endif | |
73 | ||
74 | #define KB (1024) | |
75 | #define MB (1024*KB) | |
658013e9 | 76 | #define GB (1024L*MB) |
3c726f8d | 77 | |
1da177e4 LT |
78 | /* |
79 | * Note: pte --> Linux PTE | |
80 | * HPTE --> PowerPC Hashed Page Table Entry | |
81 | * | |
82 | * Execution context: | |
83 | * htab_initialize is called with the MMU off (of course), but | |
84 | * the kernel has been copied down to zero so it can directly | |
85 | * reference global data. At this point it is very difficult | |
86 | * to print debug info. | |
87 | * | |
88 | */ | |
89 | ||
90 | #ifdef CONFIG_U3_DART | |
91 | extern unsigned long dart_tablebase; | |
92 | #endif /* CONFIG_U3_DART */ | |
93 | ||
799d6046 PM |
94 | static unsigned long _SDR1; |
95 | struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; | |
e1802b06 | 96 | EXPORT_SYMBOL_GPL(mmu_psize_defs); |
799d6046 | 97 | |
8e561e7e | 98 | struct hash_pte *htab_address; |
337a7128 | 99 | unsigned long htab_size_bytes; |
96e28449 | 100 | unsigned long htab_hash_mask; |
4ab79aa8 | 101 | EXPORT_SYMBOL_GPL(htab_hash_mask); |
3c726f8d | 102 | int mmu_linear_psize = MMU_PAGE_4K; |
8ca7a82f | 103 | EXPORT_SYMBOL_GPL(mmu_linear_psize); |
3c726f8d | 104 | int mmu_virtual_psize = MMU_PAGE_4K; |
bf72aeba | 105 | int mmu_vmalloc_psize = MMU_PAGE_4K; |
cec08e7a BH |
106 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
107 | int mmu_vmemmap_psize = MMU_PAGE_4K; | |
108 | #endif | |
bf72aeba | 109 | int mmu_io_psize = MMU_PAGE_4K; |
1189be65 | 110 | int mmu_kernel_ssize = MMU_SEGSIZE_256M; |
8ca7a82f | 111 | EXPORT_SYMBOL_GPL(mmu_kernel_ssize); |
1189be65 | 112 | int mmu_highuser_ssize = MMU_SEGSIZE_256M; |
584f8b71 | 113 | u16 mmu_slb_size = 64; |
4ab79aa8 | 114 | EXPORT_SYMBOL_GPL(mmu_slb_size); |
bf72aeba PM |
115 | #ifdef CONFIG_PPC_64K_PAGES |
116 | int mmu_ci_restrictions; | |
117 | #endif | |
370a908d BH |
118 | #ifdef CONFIG_DEBUG_PAGEALLOC |
119 | static u8 *linear_map_hash_slots; | |
120 | static unsigned long linear_map_hash_count; | |
ed166692 | 121 | static DEFINE_SPINLOCK(linear_map_hash_lock); |
370a908d | 122 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
1da177e4 | 123 | |
3c726f8d BH |
124 | /* There are definitions of page sizes arrays to be used when none |
125 | * is provided by the firmware. | |
126 | */ | |
1da177e4 | 127 | |
3c726f8d BH |
128 | /* Pre-POWER4 CPUs (4k pages only) |
129 | */ | |
09de9ff8 | 130 | static struct mmu_psize_def mmu_psize_defaults_old[] = { |
3c726f8d BH |
131 | [MMU_PAGE_4K] = { |
132 | .shift = 12, | |
133 | .sllp = 0, | |
b1022fbd | 134 | .penc = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1}, |
3c726f8d BH |
135 | .avpnm = 0, |
136 | .tlbiel = 0, | |
137 | }, | |
138 | }; | |
139 | ||
140 | /* POWER4, GPUL, POWER5 | |
141 | * | |
142 | * Support for 16Mb large pages | |
143 | */ | |
09de9ff8 | 144 | static struct mmu_psize_def mmu_psize_defaults_gp[] = { |
3c726f8d BH |
145 | [MMU_PAGE_4K] = { |
146 | .shift = 12, | |
147 | .sllp = 0, | |
b1022fbd | 148 | .penc = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1}, |
3c726f8d BH |
149 | .avpnm = 0, |
150 | .tlbiel = 1, | |
151 | }, | |
152 | [MMU_PAGE_16M] = { | |
153 | .shift = 24, | |
154 | .sllp = SLB_VSID_L, | |
b1022fbd AK |
155 | .penc = {[0 ... MMU_PAGE_16M - 1] = -1, [MMU_PAGE_16M] = 0, |
156 | [MMU_PAGE_16M + 1 ... MMU_PAGE_COUNT - 1] = -1 }, | |
3c726f8d BH |
157 | .avpnm = 0x1UL, |
158 | .tlbiel = 0, | |
159 | }, | |
160 | }; | |
161 | ||
c6a3c495 | 162 | unsigned long htab_convert_pte_flags(unsigned long pteflags) |
bc033b63 | 163 | { |
c6a3c495 | 164 | unsigned long rflags = 0; |
bc033b63 BH |
165 | |
166 | /* _PAGE_EXEC -> NOEXEC */ | |
167 | if ((pteflags & _PAGE_EXEC) == 0) | |
168 | rflags |= HPTE_R_N; | |
c6a3c495 | 169 | /* |
e58e87ad | 170 | * PPP bits: |
1ec3f937 | 171 | * Linux uses slb key 0 for kernel and 1 for user. |
e58e87ad AK |
172 | * kernel RW areas are mapped with PPP=0b000 |
173 | * User area is mapped with PPP=0b010 for read/write | |
174 | * or PPP=0b011 for read-only (including writeable but clean pages). | |
bc033b63 | 175 | */ |
e58e87ad AK |
176 | if (pteflags & _PAGE_PRIVILEGED) { |
177 | /* | |
178 | * Kernel read only mapped with ppp bits 0b110 | |
179 | */ | |
180 | if (!(pteflags & _PAGE_WRITE)) | |
181 | rflags |= (HPTE_R_PP0 | 0x2); | |
182 | } else { | |
c7d54842 AK |
183 | if (pteflags & _PAGE_RWX) |
184 | rflags |= 0x2; | |
185 | if (!((pteflags & _PAGE_WRITE) && (pteflags & _PAGE_DIRTY))) | |
c6a3c495 AK |
186 | rflags |= 0x1; |
187 | } | |
c8c06f5a AK |
188 | /* |
189 | * Always add "C" bit for perf. Memory coherence is always enabled | |
190 | */ | |
40e8550a AK |
191 | rflags |= HPTE_R_C | HPTE_R_M; |
192 | /* | |
193 | * Add in WIG bits | |
194 | */ | |
30bda41a AK |
195 | |
196 | if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT) | |
40e8550a | 197 | rflags |= HPTE_R_I; |
30bda41a AK |
198 | if ((pteflags & _PAGE_CACHE_CTL ) == _PAGE_NON_IDEMPOTENT) |
199 | rflags |= (HPTE_R_I | HPTE_R_G); | |
200 | if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO) | |
201 | rflags |= (HPTE_R_I | HPTE_R_W); | |
40e8550a AK |
202 | |
203 | return rflags; | |
bc033b63 | 204 | } |
3c726f8d BH |
205 | |
206 | int htab_bolt_mapping(unsigned long vstart, unsigned long vend, | |
bc033b63 | 207 | unsigned long pstart, unsigned long prot, |
1189be65 | 208 | int psize, int ssize) |
1da177e4 | 209 | { |
3c726f8d BH |
210 | unsigned long vaddr, paddr; |
211 | unsigned int step, shift; | |
3c726f8d | 212 | int ret = 0; |
1da177e4 | 213 | |
3c726f8d BH |
214 | shift = mmu_psize_defs[psize].shift; |
215 | step = 1 << shift; | |
1da177e4 | 216 | |
bc033b63 BH |
217 | prot = htab_convert_pte_flags(prot); |
218 | ||
219 | DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n", | |
220 | vstart, vend, pstart, prot, psize, ssize); | |
221 | ||
3c726f8d BH |
222 | for (vaddr = vstart, paddr = pstart; vaddr < vend; |
223 | vaddr += step, paddr += step) { | |
370a908d | 224 | unsigned long hash, hpteg; |
1189be65 | 225 | unsigned long vsid = get_kernel_vsid(vaddr, ssize); |
5524a27d | 226 | unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); |
9e88ba4e PM |
227 | unsigned long tprot = prot; |
228 | ||
c60ac569 AK |
229 | /* |
230 | * If we hit a bad address return error. | |
231 | */ | |
232 | if (!vsid) | |
233 | return -1; | |
9e88ba4e | 234 | /* Make kernel text executable */ |
549e8152 | 235 | if (overlaps_kernel_text(vaddr, vaddr + step)) |
9e88ba4e | 236 | tprot &= ~HPTE_R_N; |
1da177e4 | 237 | |
b18db0b8 AG |
238 | /* Make kvm guest trampolines executable */ |
239 | if (overlaps_kvm_tmp(vaddr, vaddr + step)) | |
240 | tprot &= ~HPTE_R_N; | |
241 | ||
429d2e83 MS |
242 | /* |
243 | * If relocatable, check if it overlaps interrupt vectors that | |
244 | * are copied down to real 0. For relocatable kernel | |
245 | * (e.g. kdump case) we copy interrupt vectors down to real | |
246 | * address 0. Mark that region as executable. This is | |
247 | * because on p8 system with relocation on exception feature | |
248 | * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence | |
249 | * in order to execute the interrupt handlers in virtual | |
250 | * mode the vector region need to be marked as executable. | |
251 | */ | |
252 | if ((PHYSICAL_START > MEMORY_START) && | |
253 | overlaps_interrupt_vector_text(vaddr, vaddr + step)) | |
254 | tprot &= ~HPTE_R_N; | |
255 | ||
5524a27d | 256 | hash = hpt_hash(vpn, shift, ssize); |
1da177e4 LT |
257 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); |
258 | ||
c30a4df3 | 259 | BUG_ON(!ppc_md.hpte_insert); |
5524a27d | 260 | ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot, |
b1022fbd | 261 | HPTE_V_BOLTED, psize, psize, ssize); |
c30a4df3 | 262 | |
3c726f8d BH |
263 | if (ret < 0) |
264 | break; | |
e7df0d88 | 265 | |
370a908d | 266 | #ifdef CONFIG_DEBUG_PAGEALLOC |
e7df0d88 JK |
267 | if (debug_pagealloc_enabled() && |
268 | (paddr >> PAGE_SHIFT) < linear_map_hash_count) | |
370a908d BH |
269 | linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80; |
270 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | |
3c726f8d BH |
271 | } |
272 | return ret < 0 ? ret : 0; | |
273 | } | |
1da177e4 | 274 | |
ed5694a8 | 275 | int htab_remove_mapping(unsigned long vstart, unsigned long vend, |
f8c8803b BP |
276 | int psize, int ssize) |
277 | { | |
278 | unsigned long vaddr; | |
279 | unsigned int step, shift; | |
27828f98 DG |
280 | int rc; |
281 | int ret = 0; | |
f8c8803b BP |
282 | |
283 | shift = mmu_psize_defs[psize].shift; | |
284 | step = 1 << shift; | |
285 | ||
abd0a0e7 DG |
286 | if (!ppc_md.hpte_removebolted) |
287 | return -ENODEV; | |
f8c8803b | 288 | |
27828f98 DG |
289 | for (vaddr = vstart; vaddr < vend; vaddr += step) { |
290 | rc = ppc_md.hpte_removebolted(vaddr, psize, ssize); | |
291 | if (rc == -ENOENT) { | |
292 | ret = -ENOENT; | |
293 | continue; | |
294 | } | |
295 | if (rc < 0) | |
296 | return rc; | |
297 | } | |
52db9b44 | 298 | |
27828f98 | 299 | return ret; |
f8c8803b BP |
300 | } |
301 | ||
1189be65 PM |
302 | static int __init htab_dt_scan_seg_sizes(unsigned long node, |
303 | const char *uname, int depth, | |
304 | void *data) | |
305 | { | |
9d0c4dfe RH |
306 | const char *type = of_get_flat_dt_prop(node, "device_type", NULL); |
307 | const __be32 *prop; | |
308 | int size = 0; | |
1189be65 PM |
309 | |
310 | /* We are scanning "cpu" nodes only */ | |
311 | if (type == NULL || strcmp(type, "cpu") != 0) | |
312 | return 0; | |
313 | ||
12f04f2b | 314 | prop = of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", &size); |
1189be65 PM |
315 | if (prop == NULL) |
316 | return 0; | |
317 | for (; size >= 4; size -= 4, ++prop) { | |
12f04f2b | 318 | if (be32_to_cpu(prop[0]) == 40) { |
1189be65 | 319 | DBG("1T segment support detected\n"); |
44ae3ab3 | 320 | cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT; |
f5534004 | 321 | return 1; |
1189be65 | 322 | } |
1189be65 | 323 | } |
44ae3ab3 | 324 | cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B; |
1189be65 PM |
325 | return 0; |
326 | } | |
327 | ||
328 | static void __init htab_init_seg_sizes(void) | |
329 | { | |
330 | of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL); | |
331 | } | |
332 | ||
b1022fbd AK |
333 | static int __init get_idx_from_shift(unsigned int shift) |
334 | { | |
335 | int idx = -1; | |
336 | ||
337 | switch (shift) { | |
338 | case 0xc: | |
339 | idx = MMU_PAGE_4K; | |
340 | break; | |
341 | case 0x10: | |
342 | idx = MMU_PAGE_64K; | |
343 | break; | |
344 | case 0x14: | |
345 | idx = MMU_PAGE_1M; | |
346 | break; | |
347 | case 0x18: | |
348 | idx = MMU_PAGE_16M; | |
349 | break; | |
350 | case 0x22: | |
351 | idx = MMU_PAGE_16G; | |
352 | break; | |
353 | } | |
354 | return idx; | |
355 | } | |
356 | ||
3c726f8d BH |
357 | static int __init htab_dt_scan_page_sizes(unsigned long node, |
358 | const char *uname, int depth, | |
359 | void *data) | |
360 | { | |
9d0c4dfe RH |
361 | const char *type = of_get_flat_dt_prop(node, "device_type", NULL); |
362 | const __be32 *prop; | |
363 | int size = 0; | |
3c726f8d BH |
364 | |
365 | /* We are scanning "cpu" nodes only */ | |
366 | if (type == NULL || strcmp(type, "cpu") != 0) | |
367 | return 0; | |
368 | ||
12f04f2b | 369 | prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size); |
9e34992a ME |
370 | if (!prop) |
371 | return 0; | |
372 | ||
373 | pr_info("Page sizes from device-tree:\n"); | |
374 | size /= 4; | |
375 | cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE); | |
376 | while(size > 0) { | |
377 | unsigned int base_shift = be32_to_cpu(prop[0]); | |
378 | unsigned int slbenc = be32_to_cpu(prop[1]); | |
379 | unsigned int lpnum = be32_to_cpu(prop[2]); | |
380 | struct mmu_psize_def *def; | |
381 | int idx, base_idx; | |
382 | ||
383 | size -= 3; prop += 3; | |
384 | base_idx = get_idx_from_shift(base_shift); | |
385 | if (base_idx < 0) { | |
386 | /* skip the pte encoding also */ | |
387 | prop += lpnum * 2; size -= lpnum * 2; | |
388 | continue; | |
389 | } | |
390 | def = &mmu_psize_defs[base_idx]; | |
391 | if (base_idx == MMU_PAGE_16M) | |
392 | cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE; | |
393 | ||
394 | def->shift = base_shift; | |
395 | if (base_shift <= 23) | |
396 | def->avpnm = 0; | |
397 | else | |
398 | def->avpnm = (1 << (base_shift - 23)) - 1; | |
399 | def->sllp = slbenc; | |
400 | /* | |
401 | * We don't know for sure what's up with tlbiel, so | |
402 | * for now we only set it for 4K and 64K pages | |
403 | */ | |
404 | if (base_idx == MMU_PAGE_4K || base_idx == MMU_PAGE_64K) | |
405 | def->tlbiel = 1; | |
406 | else | |
407 | def->tlbiel = 0; | |
408 | ||
409 | while (size > 0 && lpnum) { | |
410 | unsigned int shift = be32_to_cpu(prop[0]); | |
411 | int penc = be32_to_cpu(prop[1]); | |
412 | ||
413 | prop += 2; size -= 2; | |
414 | lpnum--; | |
415 | ||
416 | idx = get_idx_from_shift(shift); | |
417 | if (idx < 0) | |
b1022fbd | 418 | continue; |
9e34992a ME |
419 | |
420 | if (penc == -1) | |
421 | pr_err("Invalid penc for base_shift=%d " | |
422 | "shift=%d\n", base_shift, shift); | |
423 | ||
424 | def->penc[idx] = penc; | |
425 | pr_info("base_shift=%d: shift=%d, sllp=0x%04lx," | |
426 | " avpnm=0x%08lx, tlbiel=%d, penc=%d\n", | |
427 | base_shift, shift, def->sllp, | |
428 | def->avpnm, def->tlbiel, def->penc[idx]); | |
1da177e4 | 429 | } |
3c726f8d | 430 | } |
9e34992a ME |
431 | |
432 | return 1; | |
3c726f8d BH |
433 | } |
434 | ||
e16a9c09 | 435 | #ifdef CONFIG_HUGETLB_PAGE |
658013e9 JT |
436 | /* Scan for 16G memory blocks that have been set aside for huge pages |
437 | * and reserve those blocks for 16G huge pages. | |
438 | */ | |
439 | static int __init htab_dt_scan_hugepage_blocks(unsigned long node, | |
440 | const char *uname, int depth, | |
441 | void *data) { | |
9d0c4dfe RH |
442 | const char *type = of_get_flat_dt_prop(node, "device_type", NULL); |
443 | const __be64 *addr_prop; | |
444 | const __be32 *page_count_prop; | |
658013e9 JT |
445 | unsigned int expected_pages; |
446 | long unsigned int phys_addr; | |
447 | long unsigned int block_size; | |
448 | ||
449 | /* We are scanning "memory" nodes only */ | |
450 | if (type == NULL || strcmp(type, "memory") != 0) | |
451 | return 0; | |
452 | ||
453 | /* This property is the log base 2 of the number of virtual pages that | |
454 | * will represent this memory block. */ | |
455 | page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL); | |
456 | if (page_count_prop == NULL) | |
457 | return 0; | |
12f04f2b | 458 | expected_pages = (1 << be32_to_cpu(page_count_prop[0])); |
658013e9 JT |
459 | addr_prop = of_get_flat_dt_prop(node, "reg", NULL); |
460 | if (addr_prop == NULL) | |
461 | return 0; | |
12f04f2b AB |
462 | phys_addr = be64_to_cpu(addr_prop[0]); |
463 | block_size = be64_to_cpu(addr_prop[1]); | |
658013e9 JT |
464 | if (block_size != (16 * GB)) |
465 | return 0; | |
466 | printk(KERN_INFO "Huge page(16GB) memory: " | |
467 | "addr = 0x%lX size = 0x%lX pages = %d\n", | |
468 | phys_addr, block_size, expected_pages); | |
95f72d1e YL |
469 | if (phys_addr + (16 * GB) <= memblock_end_of_DRAM()) { |
470 | memblock_reserve(phys_addr, block_size * expected_pages); | |
4792adba JT |
471 | add_gpage(phys_addr, block_size, expected_pages); |
472 | } | |
658013e9 JT |
473 | return 0; |
474 | } | |
e16a9c09 | 475 | #endif /* CONFIG_HUGETLB_PAGE */ |
658013e9 | 476 | |
b1022fbd AK |
477 | static void mmu_psize_set_default_penc(void) |
478 | { | |
479 | int bpsize, apsize; | |
480 | for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++) | |
481 | for (apsize = 0; apsize < MMU_PAGE_COUNT; apsize++) | |
482 | mmu_psize_defs[bpsize].penc[apsize] = -1; | |
483 | } | |
484 | ||
9048e648 AG |
485 | #ifdef CONFIG_PPC_64K_PAGES |
486 | ||
487 | static bool might_have_hea(void) | |
488 | { | |
489 | /* | |
490 | * The HEA ethernet adapter requires awareness of the | |
491 | * GX bus. Without that awareness we can easily assume | |
492 | * we will never see an HEA ethernet device. | |
493 | */ | |
494 | #ifdef CONFIG_IBMEBUS | |
495 | return !cpu_has_feature(CPU_FTR_ARCH_207S); | |
496 | #else | |
497 | return false; | |
498 | #endif | |
499 | } | |
500 | ||
501 | #endif /* #ifdef CONFIG_PPC_64K_PAGES */ | |
502 | ||
3c726f8d BH |
503 | static void __init htab_init_page_sizes(void) |
504 | { | |
505 | int rc; | |
506 | ||
b1022fbd AK |
507 | /* se the invalid penc to -1 */ |
508 | mmu_psize_set_default_penc(); | |
509 | ||
3c726f8d BH |
510 | /* Default to 4K pages only */ |
511 | memcpy(mmu_psize_defs, mmu_psize_defaults_old, | |
512 | sizeof(mmu_psize_defaults_old)); | |
513 | ||
514 | /* | |
515 | * Try to find the available page sizes in the device-tree | |
516 | */ | |
517 | rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL); | |
518 | if (rc != 0) /* Found */ | |
519 | goto found; | |
520 | ||
521 | /* | |
522 | * Not in the device-tree, let's fallback on known size | |
523 | * list for 16M capable GP & GR | |
524 | */ | |
44ae3ab3 | 525 | if (mmu_has_feature(MMU_FTR_16M_PAGE)) |
3c726f8d BH |
526 | memcpy(mmu_psize_defs, mmu_psize_defaults_gp, |
527 | sizeof(mmu_psize_defaults_gp)); | |
e7df0d88 JK |
528 | found: |
529 | if (!debug_pagealloc_enabled()) { | |
530 | /* | |
531 | * Pick a size for the linear mapping. Currently, we only | |
532 | * support 16M, 1M and 4K which is the default | |
533 | */ | |
534 | if (mmu_psize_defs[MMU_PAGE_16M].shift) | |
535 | mmu_linear_psize = MMU_PAGE_16M; | |
536 | else if (mmu_psize_defs[MMU_PAGE_1M].shift) | |
537 | mmu_linear_psize = MMU_PAGE_1M; | |
538 | } | |
3c726f8d | 539 | |
bf72aeba | 540 | #ifdef CONFIG_PPC_64K_PAGES |
3c726f8d BH |
541 | /* |
542 | * Pick a size for the ordinary pages. Default is 4K, we support | |
bf72aeba PM |
543 | * 64K for user mappings and vmalloc if supported by the processor. |
544 | * We only use 64k for ioremap if the processor | |
545 | * (and firmware) support cache-inhibited large pages. | |
546 | * If not, we use 4k and set mmu_ci_restrictions so that | |
547 | * hash_page knows to switch processes that use cache-inhibited | |
548 | * mappings to 4k pages. | |
3c726f8d | 549 | */ |
bf72aeba | 550 | if (mmu_psize_defs[MMU_PAGE_64K].shift) { |
3c726f8d | 551 | mmu_virtual_psize = MMU_PAGE_64K; |
bf72aeba | 552 | mmu_vmalloc_psize = MMU_PAGE_64K; |
370a908d BH |
553 | if (mmu_linear_psize == MMU_PAGE_4K) |
554 | mmu_linear_psize = MMU_PAGE_64K; | |
44ae3ab3 | 555 | if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) { |
cfe666b1 | 556 | /* |
9048e648 AG |
557 | * When running on pSeries using 64k pages for ioremap |
558 | * would stop us accessing the HEA ethernet. So if we | |
559 | * have the chance of ever seeing one, stay at 4k. | |
cfe666b1 | 560 | */ |
9048e648 | 561 | if (!might_have_hea() || !machine_is(pseries)) |
cfe666b1 PM |
562 | mmu_io_psize = MMU_PAGE_64K; |
563 | } else | |
bf72aeba PM |
564 | mmu_ci_restrictions = 1; |
565 | } | |
370a908d | 566 | #endif /* CONFIG_PPC_64K_PAGES */ |
3c726f8d | 567 | |
cec08e7a BH |
568 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
569 | /* We try to use 16M pages for vmemmap if that is supported | |
570 | * and we have at least 1G of RAM at boot | |
571 | */ | |
572 | if (mmu_psize_defs[MMU_PAGE_16M].shift && | |
95f72d1e | 573 | memblock_phys_mem_size() >= 0x40000000) |
cec08e7a BH |
574 | mmu_vmemmap_psize = MMU_PAGE_16M; |
575 | else if (mmu_psize_defs[MMU_PAGE_64K].shift) | |
576 | mmu_vmemmap_psize = MMU_PAGE_64K; | |
577 | else | |
578 | mmu_vmemmap_psize = MMU_PAGE_4K; | |
579 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | |
580 | ||
bf72aeba | 581 | printk(KERN_DEBUG "Page orders: linear mapping = %d, " |
cec08e7a BH |
582 | "virtual = %d, io = %d" |
583 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
584 | ", vmemmap = %d" | |
585 | #endif | |
586 | "\n", | |
3c726f8d | 587 | mmu_psize_defs[mmu_linear_psize].shift, |
bf72aeba | 588 | mmu_psize_defs[mmu_virtual_psize].shift, |
cec08e7a BH |
589 | mmu_psize_defs[mmu_io_psize].shift |
590 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
591 | ,mmu_psize_defs[mmu_vmemmap_psize].shift | |
592 | #endif | |
593 | ); | |
3c726f8d BH |
594 | |
595 | #ifdef CONFIG_HUGETLB_PAGE | |
658013e9 JT |
596 | /* Reserve 16G huge page memory sections for huge pages */ |
597 | of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL); | |
3c726f8d BH |
598 | #endif /* CONFIG_HUGETLB_PAGE */ |
599 | } | |
600 | ||
601 | static int __init htab_dt_scan_pftsize(unsigned long node, | |
602 | const char *uname, int depth, | |
603 | void *data) | |
604 | { | |
9d0c4dfe RH |
605 | const char *type = of_get_flat_dt_prop(node, "device_type", NULL); |
606 | const __be32 *prop; | |
3c726f8d BH |
607 | |
608 | /* We are scanning "cpu" nodes only */ | |
609 | if (type == NULL || strcmp(type, "cpu") != 0) | |
610 | return 0; | |
611 | ||
12f04f2b | 612 | prop = of_get_flat_dt_prop(node, "ibm,pft-size", NULL); |
3c726f8d BH |
613 | if (prop != NULL) { |
614 | /* pft_size[0] is the NUMA CEC cookie */ | |
12f04f2b | 615 | ppc64_pft_size = be32_to_cpu(prop[1]); |
3c726f8d | 616 | return 1; |
1da177e4 | 617 | } |
3c726f8d | 618 | return 0; |
1da177e4 LT |
619 | } |
620 | ||
5c3c7ede | 621 | unsigned htab_shift_for_mem_size(unsigned long mem_size) |
3eac8c69 | 622 | { |
5c3c7ede DG |
623 | unsigned memshift = __ilog2(mem_size); |
624 | unsigned pshift = mmu_psize_defs[mmu_virtual_psize].shift; | |
625 | unsigned pteg_shift; | |
626 | ||
627 | /* round mem_size up to next power of 2 */ | |
628 | if ((1UL << memshift) < mem_size) | |
629 | memshift += 1; | |
3eac8c69 | 630 | |
5c3c7ede DG |
631 | /* aim for 2 pages / pteg */ |
632 | pteg_shift = memshift - (pshift + 1); | |
3eac8c69 | 633 | |
5c3c7ede DG |
634 | /* |
635 | * 2^11 PTEGS of 128 bytes each, ie. 2^18 bytes is the minimum htab | |
636 | * size permitted by the architecture. | |
637 | */ | |
638 | return max(pteg_shift + 7, 18U); | |
639 | } | |
640 | ||
641 | static unsigned long __init htab_get_table_size(void) | |
642 | { | |
3c726f8d | 643 | /* If hash size isn't already provided by the platform, we try to |
943ffb58 | 644 | * retrieve it from the device-tree. If it's not there neither, we |
3c726f8d | 645 | * calculate it now based on the total RAM size |
3eac8c69 | 646 | */ |
3c726f8d BH |
647 | if (ppc64_pft_size == 0) |
648 | of_scan_flat_dt(htab_dt_scan_pftsize, NULL); | |
3eac8c69 PM |
649 | if (ppc64_pft_size) |
650 | return 1UL << ppc64_pft_size; | |
651 | ||
5c3c7ede | 652 | return 1UL << htab_shift_for_mem_size(memblock_phys_mem_size()); |
3eac8c69 PM |
653 | } |
654 | ||
54b79248 | 655 | #ifdef CONFIG_MEMORY_HOTPLUG |
a1194097 | 656 | int create_section_mapping(unsigned long start, unsigned long end) |
54b79248 | 657 | { |
1dace6c6 DG |
658 | int rc = htab_bolt_mapping(start, end, __pa(start), |
659 | pgprot_val(PAGE_KERNEL), mmu_linear_psize, | |
660 | mmu_kernel_ssize); | |
661 | ||
662 | if (rc < 0) { | |
663 | int rc2 = htab_remove_mapping(start, end, mmu_linear_psize, | |
664 | mmu_kernel_ssize); | |
665 | BUG_ON(rc2 && (rc2 != -ENOENT)); | |
666 | } | |
667 | return rc; | |
54b79248 | 668 | } |
f8c8803b | 669 | |
52db9b44 | 670 | int remove_section_mapping(unsigned long start, unsigned long end) |
f8c8803b | 671 | { |
abd0a0e7 DG |
672 | int rc = htab_remove_mapping(start, end, mmu_linear_psize, |
673 | mmu_kernel_ssize); | |
674 | WARN_ON(rc < 0); | |
675 | return rc; | |
f8c8803b | 676 | } |
54b79248 MK |
677 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
678 | ||
50de596d AK |
679 | static void __init hash_init_partition_table(phys_addr_t hash_table, |
680 | unsigned long pteg_count) | |
681 | { | |
682 | unsigned long ps_field; | |
683 | unsigned long htab_size; | |
684 | unsigned long patb_size = 1UL << PATB_SIZE_SHIFT; | |
685 | ||
686 | /* | |
687 | * slb llp encoding for the page size used in VPM real mode. | |
688 | * We can ignore that for lpid 0 | |
689 | */ | |
690 | ps_field = 0; | |
691 | htab_size = __ilog2(pteg_count) - 11; | |
692 | ||
693 | BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large."); | |
694 | partition_tb = __va(memblock_alloc_base(patb_size, patb_size, | |
695 | MEMBLOCK_ALLOC_ANYWHERE)); | |
696 | ||
697 | /* Initialize the Partition Table with no entries */ | |
698 | memset((void *)partition_tb, 0, patb_size); | |
699 | partition_tb->patb0 = cpu_to_be64(ps_field | hash_table | htab_size); | |
700 | /* | |
701 | * FIXME!! This should be done via update_partition table | |
702 | * For now UPRT is 0 for us. | |
703 | */ | |
704 | partition_tb->patb1 = 0; | |
705 | DBG("Partition table %p\n", partition_tb); | |
706 | /* | |
707 | * update partition table control register, | |
708 | * 64 K size. | |
709 | */ | |
710 | mtspr(SPRN_PTCR, __pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); | |
711 | ||
712 | } | |
713 | ||
757c74d2 | 714 | static void __init htab_initialize(void) |
1da177e4 | 715 | { |
337a7128 | 716 | unsigned long table; |
1da177e4 | 717 | unsigned long pteg_count; |
9e88ba4e | 718 | unsigned long prot; |
41d824bf | 719 | unsigned long base = 0, size = 0, limit; |
28be7072 | 720 | struct memblock_region *reg; |
3c726f8d | 721 | |
1da177e4 LT |
722 | DBG(" -> htab_initialize()\n"); |
723 | ||
1189be65 PM |
724 | /* Initialize segment sizes */ |
725 | htab_init_seg_sizes(); | |
726 | ||
3c726f8d BH |
727 | /* Initialize page sizes */ |
728 | htab_init_page_sizes(); | |
729 | ||
44ae3ab3 | 730 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) { |
1189be65 PM |
731 | mmu_kernel_ssize = MMU_SEGSIZE_1T; |
732 | mmu_highuser_ssize = MMU_SEGSIZE_1T; | |
733 | printk(KERN_INFO "Using 1TB segments\n"); | |
734 | } | |
735 | ||
1da177e4 LT |
736 | /* |
737 | * Calculate the required size of the htab. We want the number of | |
738 | * PTEGs to equal one half the number of real pages. | |
739 | */ | |
3c726f8d | 740 | htab_size_bytes = htab_get_table_size(); |
1da177e4 LT |
741 | pteg_count = htab_size_bytes >> 7; |
742 | ||
1da177e4 LT |
743 | htab_hash_mask = pteg_count - 1; |
744 | ||
57cfb814 | 745 | if (firmware_has_feature(FW_FEATURE_LPAR)) { |
1da177e4 LT |
746 | /* Using a hypervisor which owns the htab */ |
747 | htab_address = NULL; | |
748 | _SDR1 = 0; | |
3ccc00a7 MS |
749 | #ifdef CONFIG_FA_DUMP |
750 | /* | |
751 | * If firmware assisted dump is active firmware preserves | |
752 | * the contents of htab along with entire partition memory. | |
753 | * Clear the htab if firmware assisted dump is active so | |
754 | * that we dont end up using old mappings. | |
755 | */ | |
756 | if (is_fadump_active() && ppc_md.hpte_clear_all) | |
757 | ppc_md.hpte_clear_all(); | |
758 | #endif | |
1da177e4 LT |
759 | } else { |
760 | /* Find storage for the HPT. Must be contiguous in | |
41d824bf | 761 | * the absolute address space. On cell we want it to be |
31bf1119 | 762 | * in the first 2 Gig so we can use it for IOMMU hacks. |
1da177e4 | 763 | */ |
41d824bf | 764 | if (machine_is(cell)) |
31bf1119 | 765 | limit = 0x80000000; |
41d824bf | 766 | else |
27f574c2 | 767 | limit = MEMBLOCK_ALLOC_ANYWHERE; |
41d824bf | 768 | |
95f72d1e | 769 | table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit); |
1da177e4 LT |
770 | |
771 | DBG("Hash table allocated at %lx, size: %lx\n", table, | |
772 | htab_size_bytes); | |
773 | ||
70267a7f | 774 | htab_address = __va(table); |
1da177e4 LT |
775 | |
776 | /* htab absolute addr + encoded htabsize */ | |
777 | _SDR1 = table + __ilog2(pteg_count) - 11; | |
778 | ||
779 | /* Initialize the HPT with no entries */ | |
780 | memset((void *)table, 0, htab_size_bytes); | |
799d6046 | 781 | |
50de596d AK |
782 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) |
783 | /* Set SDR1 */ | |
784 | mtspr(SPRN_SDR1, _SDR1); | |
785 | else | |
786 | hash_init_partition_table(table, pteg_count); | |
1da177e4 LT |
787 | } |
788 | ||
f5ea64dc | 789 | prot = pgprot_val(PAGE_KERNEL); |
1da177e4 | 790 | |
370a908d | 791 | #ifdef CONFIG_DEBUG_PAGEALLOC |
e7df0d88 JK |
792 | if (debug_pagealloc_enabled()) { |
793 | linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; | |
794 | linear_map_hash_slots = __va(memblock_alloc_base( | |
795 | linear_map_hash_count, 1, ppc64_rma_size)); | |
796 | memset(linear_map_hash_slots, 0, linear_map_hash_count); | |
797 | } | |
370a908d BH |
798 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
799 | ||
1da177e4 LT |
800 | /* On U3 based machines, we need to reserve the DART area and |
801 | * _NOT_ map it to avoid cache paradoxes as it's remapped non | |
802 | * cacheable later on | |
803 | */ | |
1da177e4 LT |
804 | |
805 | /* create bolted the linear mapping in the hash table */ | |
28be7072 BH |
806 | for_each_memblock(memory, reg) { |
807 | base = (unsigned long)__va(reg->base); | |
808 | size = reg->size; | |
1da177e4 | 809 | |
5c339919 | 810 | DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", |
9e88ba4e | 811 | base, size, prot); |
1da177e4 LT |
812 | |
813 | #ifdef CONFIG_U3_DART | |
814 | /* Do not map the DART space. Fortunately, it will be aligned | |
95f72d1e | 815 | * in such a way that it will not cross two memblock regions and |
3c726f8d BH |
816 | * will fit within a single 16Mb page. |
817 | * The DART space is assumed to be a full 16Mb region even if | |
818 | * we only use 2Mb of that space. We will use more of it later | |
819 | * for AGP GART. We have to use a full 16Mb large page. | |
1da177e4 LT |
820 | */ |
821 | DBG("DART base: %lx\n", dart_tablebase); | |
822 | ||
823 | if (dart_tablebase != 0 && dart_tablebase >= base | |
824 | && dart_tablebase < (base + size)) { | |
caf80e57 | 825 | unsigned long dart_table_end = dart_tablebase + 16 * MB; |
1da177e4 | 826 | if (base != dart_tablebase) |
3c726f8d | 827 | BUG_ON(htab_bolt_mapping(base, dart_tablebase, |
9e88ba4e | 828 | __pa(base), prot, |
1189be65 PM |
829 | mmu_linear_psize, |
830 | mmu_kernel_ssize)); | |
caf80e57 | 831 | if ((base + size) > dart_table_end) |
3c726f8d | 832 | BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB, |
caf80e57 ME |
833 | base + size, |
834 | __pa(dart_table_end), | |
9e88ba4e | 835 | prot, |
1189be65 PM |
836 | mmu_linear_psize, |
837 | mmu_kernel_ssize)); | |
1da177e4 LT |
838 | continue; |
839 | } | |
840 | #endif /* CONFIG_U3_DART */ | |
caf80e57 | 841 | BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), |
9e88ba4e | 842 | prot, mmu_linear_psize, mmu_kernel_ssize)); |
e63075a3 BH |
843 | } |
844 | memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); | |
1da177e4 LT |
845 | |
846 | /* | |
847 | * If we have a memory_limit and we've allocated TCEs then we need to | |
848 | * explicitly map the TCE area at the top of RAM. We also cope with the | |
849 | * case that the TCEs start below memory_limit. | |
850 | * tce_alloc_start/end are 16MB aligned so the mapping should work | |
851 | * for either 4K or 16MB pages. | |
852 | */ | |
853 | if (tce_alloc_start) { | |
b5666f70 ME |
854 | tce_alloc_start = (unsigned long)__va(tce_alloc_start); |
855 | tce_alloc_end = (unsigned long)__va(tce_alloc_end); | |
1da177e4 LT |
856 | |
857 | if (base + size >= tce_alloc_start) | |
858 | tce_alloc_start = base + size + 1; | |
859 | ||
caf80e57 | 860 | BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end, |
bc033b63 | 861 | __pa(tce_alloc_start), prot, |
1189be65 | 862 | mmu_linear_psize, mmu_kernel_ssize)); |
1da177e4 LT |
863 | } |
864 | ||
7d0daae4 | 865 | |
1da177e4 LT |
866 | DBG(" <- htab_initialize()\n"); |
867 | } | |
868 | #undef KB | |
869 | #undef MB | |
1da177e4 | 870 | |
757c74d2 | 871 | void __init early_init_mmu(void) |
799d6046 | 872 | { |
dd1842a2 AK |
873 | /* |
874 | * initialize page table size | |
875 | */ | |
876 | __pte_index_size = H_PTE_INDEX_SIZE; | |
877 | __pmd_index_size = H_PMD_INDEX_SIZE; | |
878 | __pud_index_size = H_PUD_INDEX_SIZE; | |
879 | __pgd_index_size = H_PGD_INDEX_SIZE; | |
880 | __pmd_cache_index = H_PMD_CACHE_INDEX; | |
881 | __pte_table_size = H_PTE_TABLE_SIZE; | |
882 | __pmd_table_size = H_PMD_TABLE_SIZE; | |
883 | __pud_table_size = H_PUD_TABLE_SIZE; | |
884 | __pgd_table_size = H_PGD_TABLE_SIZE; | |
757c74d2 | 885 | /* Initialize the MMU Hash table and create the linear mapping |
376af594 ME |
886 | * of memory. Has to be done before SLB initialization as this is |
887 | * currently where the page size encoding is obtained. | |
757c74d2 BH |
888 | */ |
889 | htab_initialize(); | |
890 | ||
376af594 | 891 | /* Initialize SLB management */ |
13b3d13b | 892 | slb_initialize(); |
757c74d2 BH |
893 | } |
894 | ||
895 | #ifdef CONFIG_SMP | |
061d19f2 | 896 | void early_init_mmu_secondary(void) |
757c74d2 BH |
897 | { |
898 | /* Initialize hash table for that CPU */ | |
57cfb814 | 899 | if (!firmware_has_feature(FW_FEATURE_LPAR)) |
799d6046 | 900 | mtspr(SPRN_SDR1, _SDR1); |
757c74d2 | 901 | |
376af594 | 902 | /* Initialize SLB */ |
13b3d13b | 903 | slb_initialize(); |
799d6046 | 904 | } |
757c74d2 | 905 | #endif /* CONFIG_SMP */ |
799d6046 | 906 | |
1da177e4 LT |
907 | /* |
908 | * Called by asm hashtable.S for doing lazy icache flush | |
909 | */ | |
910 | unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap) | |
911 | { | |
912 | struct page *page; | |
913 | ||
76c8e25b BH |
914 | if (!pfn_valid(pte_pfn(pte))) |
915 | return pp; | |
916 | ||
1da177e4 LT |
917 | page = pte_page(pte); |
918 | ||
919 | /* page is dirty */ | |
920 | if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) { | |
921 | if (trap == 0x400) { | |
0895ecda | 922 | flush_dcache_icache_page(page); |
1da177e4 LT |
923 | set_bit(PG_arch_1, &page->flags); |
924 | } else | |
3c726f8d | 925 | pp |= HPTE_R_N; |
1da177e4 LT |
926 | } |
927 | return pp; | |
928 | } | |
929 | ||
3a8247cc | 930 | #ifdef CONFIG_PPC_MM_SLICES |
e51df2c1 | 931 | static unsigned int get_paca_psize(unsigned long addr) |
3a8247cc | 932 | { |
7aa0727f AK |
933 | u64 lpsizes; |
934 | unsigned char *hpsizes; | |
935 | unsigned long index, mask_index; | |
3a8247cc PM |
936 | |
937 | if (addr < SLICE_LOW_TOP) { | |
2fc251a8 | 938 | lpsizes = get_paca()->mm_ctx_low_slices_psize; |
3a8247cc | 939 | index = GET_LOW_SLICE_INDEX(addr); |
7aa0727f | 940 | return (lpsizes >> (index * 4)) & 0xF; |
3a8247cc | 941 | } |
2fc251a8 | 942 | hpsizes = get_paca()->mm_ctx_high_slices_psize; |
7aa0727f AK |
943 | index = GET_HIGH_SLICE_INDEX(addr); |
944 | mask_index = index & 0x1; | |
945 | return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xF; | |
3a8247cc PM |
946 | } |
947 | ||
948 | #else | |
949 | unsigned int get_paca_psize(unsigned long addr) | |
950 | { | |
c33e54fa | 951 | return get_paca()->mm_ctx_user_psize; |
3a8247cc PM |
952 | } |
953 | #endif | |
954 | ||
721151d0 PM |
955 | /* |
956 | * Demote a segment to using 4k pages. | |
957 | * For now this makes the whole process use 4k pages. | |
958 | */ | |
721151d0 | 959 | #ifdef CONFIG_PPC_64K_PAGES |
fa28237c | 960 | void demote_segment_4k(struct mm_struct *mm, unsigned long addr) |
16f1c746 | 961 | { |
3a8247cc | 962 | if (get_slice_psize(mm, addr) == MMU_PAGE_4K) |
721151d0 | 963 | return; |
3a8247cc | 964 | slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K); |
be3ebfe8 | 965 | copro_flush_all_slbs(mm); |
a1dca346 | 966 | if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) { |
c395465d MN |
967 | |
968 | copy_mm_to_paca(&mm->context); | |
fa28237c PM |
969 | slb_flush_and_rebolt(); |
970 | } | |
721151d0 | 971 | } |
16f1c746 | 972 | #endif /* CONFIG_PPC_64K_PAGES */ |
721151d0 | 973 | |
fa28237c PM |
974 | #ifdef CONFIG_PPC_SUBPAGE_PROT |
975 | /* | |
976 | * This looks up a 2-bit protection code for a 4k subpage of a 64k page. | |
977 | * Userspace sets the subpage permissions using the subpage_prot system call. | |
978 | * | |
979 | * Result is 0: full permissions, _PAGE_RW: read-only, | |
73a1441a | 980 | * _PAGE_RWX: no access. |
fa28237c | 981 | */ |
d28513bc | 982 | static int subpage_protection(struct mm_struct *mm, unsigned long ea) |
fa28237c | 983 | { |
d28513bc | 984 | struct subpage_prot_table *spt = &mm->context.spt; |
fa28237c PM |
985 | u32 spp = 0; |
986 | u32 **sbpm, *sbpp; | |
987 | ||
988 | if (ea >= spt->maxaddr) | |
989 | return 0; | |
b0d436c7 | 990 | if (ea < 0x100000000UL) { |
fa28237c PM |
991 | /* addresses below 4GB use spt->low_prot */ |
992 | sbpm = spt->low_prot; | |
993 | } else { | |
994 | sbpm = spt->protptrs[ea >> SBP_L3_SHIFT]; | |
995 | if (!sbpm) | |
996 | return 0; | |
997 | } | |
998 | sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)]; | |
999 | if (!sbpp) | |
1000 | return 0; | |
1001 | spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)]; | |
1002 | ||
1003 | /* extract 2-bit bitfield for this 4k subpage */ | |
1004 | spp >>= 30 - 2 * ((ea >> 12) & 0xf); | |
1005 | ||
73a1441a AK |
1006 | /* |
1007 | * 0 -> full premission | |
1008 | * 1 -> Read only | |
1009 | * 2 -> no access. | |
1010 | * We return the flag that need to be cleared. | |
1011 | */ | |
1012 | spp = ((spp & 2) ? _PAGE_RWX : 0) | ((spp & 1) ? _PAGE_WRITE : 0); | |
fa28237c PM |
1013 | return spp; |
1014 | } | |
1015 | ||
1016 | #else /* CONFIG_PPC_SUBPAGE_PROT */ | |
d28513bc | 1017 | static inline int subpage_protection(struct mm_struct *mm, unsigned long ea) |
fa28237c PM |
1018 | { |
1019 | return 0; | |
1020 | } | |
1021 | #endif | |
1022 | ||
4b8692c0 BH |
1023 | void hash_failure_debug(unsigned long ea, unsigned long access, |
1024 | unsigned long vsid, unsigned long trap, | |
d8139ebf | 1025 | int ssize, int psize, int lpsize, unsigned long pte) |
4b8692c0 BH |
1026 | { |
1027 | if (!printk_ratelimit()) | |
1028 | return; | |
1029 | pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n", | |
1030 | ea, access, current->comm); | |
d8139ebf AK |
1031 | pr_info(" trap=0x%lx vsid=0x%lx ssize=%d base psize=%d psize %d pte=0x%lx\n", |
1032 | trap, vsid, ssize, psize, lpsize, pte); | |
4b8692c0 BH |
1033 | } |
1034 | ||
09567e7f ME |
1035 | static void check_paca_psize(unsigned long ea, struct mm_struct *mm, |
1036 | int psize, bool user_region) | |
1037 | { | |
1038 | if (user_region) { | |
1039 | if (psize != get_paca_psize(ea)) { | |
c395465d | 1040 | copy_mm_to_paca(&mm->context); |
09567e7f ME |
1041 | slb_flush_and_rebolt(); |
1042 | } | |
1043 | } else if (get_paca()->vmalloc_sllp != | |
1044 | mmu_psize_defs[mmu_vmalloc_psize].sllp) { | |
1045 | get_paca()->vmalloc_sllp = | |
1046 | mmu_psize_defs[mmu_vmalloc_psize].sllp; | |
1047 | slb_vmalloc_update(); | |
1048 | } | |
1049 | } | |
1050 | ||
1da177e4 LT |
1051 | /* Result code is: |
1052 | * 0 - handled | |
1053 | * 1 - normal page fault | |
1054 | * -1 - critical hash insertion error | |
fa28237c | 1055 | * -2 - access not permitted by subpage protection mechanism |
1da177e4 | 1056 | */ |
aefa5688 AK |
1057 | int hash_page_mm(struct mm_struct *mm, unsigned long ea, |
1058 | unsigned long access, unsigned long trap, | |
1059 | unsigned long flags) | |
1da177e4 | 1060 | { |
891121e6 | 1061 | bool is_thp; |
ba12eede | 1062 | enum ctx_state prev_state = exception_enter(); |
a1128f8f | 1063 | pgd_t *pgdir; |
1da177e4 | 1064 | unsigned long vsid; |
1da177e4 | 1065 | pte_t *ptep; |
a4fe3ce7 | 1066 | unsigned hugeshift; |
56aa4129 | 1067 | const struct cpumask *tmp; |
aefa5688 | 1068 | int rc, user_region = 0; |
1189be65 | 1069 | int psize, ssize; |
1da177e4 | 1070 | |
3c726f8d BH |
1071 | DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", |
1072 | ea, access, trap); | |
cfcb3d80 | 1073 | trace_hash_fault(ea, access, trap); |
1f8d419e | 1074 | |
3c726f8d | 1075 | /* Get region & vsid */ |
1da177e4 LT |
1076 | switch (REGION_ID(ea)) { |
1077 | case USER_REGION_ID: | |
1078 | user_region = 1; | |
3c726f8d BH |
1079 | if (! mm) { |
1080 | DBG_LOW(" user region with no mm !\n"); | |
ba12eede LZ |
1081 | rc = 1; |
1082 | goto bail; | |
3c726f8d | 1083 | } |
16c2d476 | 1084 | psize = get_slice_psize(mm, ea); |
1189be65 PM |
1085 | ssize = user_segment_size(ea); |
1086 | vsid = get_vsid(mm->context.id, ea, ssize); | |
1da177e4 | 1087 | break; |
1da177e4 | 1088 | case VMALLOC_REGION_ID: |
1189be65 | 1089 | vsid = get_kernel_vsid(ea, mmu_kernel_ssize); |
bf72aeba PM |
1090 | if (ea < VMALLOC_END) |
1091 | psize = mmu_vmalloc_psize; | |
1092 | else | |
1093 | psize = mmu_io_psize; | |
1189be65 | 1094 | ssize = mmu_kernel_ssize; |
1da177e4 | 1095 | break; |
1da177e4 LT |
1096 | default: |
1097 | /* Not a valid range | |
1098 | * Send the problem up to do_page_fault | |
1099 | */ | |
ba12eede LZ |
1100 | rc = 1; |
1101 | goto bail; | |
1da177e4 | 1102 | } |
3c726f8d | 1103 | DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); |
1da177e4 | 1104 | |
c60ac569 AK |
1105 | /* Bad address. */ |
1106 | if (!vsid) { | |
1107 | DBG_LOW("Bad address!\n"); | |
ba12eede LZ |
1108 | rc = 1; |
1109 | goto bail; | |
c60ac569 | 1110 | } |
3c726f8d | 1111 | /* Get pgdir */ |
1da177e4 | 1112 | pgdir = mm->pgd; |
ba12eede LZ |
1113 | if (pgdir == NULL) { |
1114 | rc = 1; | |
1115 | goto bail; | |
1116 | } | |
1da177e4 | 1117 | |
3c726f8d | 1118 | /* Check CPU locality */ |
56aa4129 RR |
1119 | tmp = cpumask_of(smp_processor_id()); |
1120 | if (user_region && cpumask_equal(mm_cpumask(mm), tmp)) | |
aefa5688 | 1121 | flags |= HPTE_LOCAL_UPDATE; |
1da177e4 | 1122 | |
16c2d476 | 1123 | #ifndef CONFIG_PPC_64K_PAGES |
a4fe3ce7 DG |
1124 | /* If we use 4K pages and our psize is not 4K, then we might |
1125 | * be hitting a special driver mapping, and need to align the | |
1126 | * address before we fetch the PTE. | |
1127 | * | |
1128 | * It could also be a hugepage mapping, in which case this is | |
1129 | * not necessary, but it's not harmful, either. | |
16c2d476 BH |
1130 | */ |
1131 | if (psize != MMU_PAGE_4K) | |
1132 | ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1); | |
1133 | #endif /* CONFIG_PPC_64K_PAGES */ | |
1134 | ||
3c726f8d | 1135 | /* Get PTE and page size from page tables */ |
891121e6 | 1136 | ptep = __find_linux_pte_or_hugepte(pgdir, ea, &is_thp, &hugeshift); |
3c726f8d BH |
1137 | if (ptep == NULL || !pte_present(*ptep)) { |
1138 | DBG_LOW(" no PTE !\n"); | |
ba12eede LZ |
1139 | rc = 1; |
1140 | goto bail; | |
3c726f8d BH |
1141 | } |
1142 | ||
ca91e6c0 BH |
1143 | /* Add _PAGE_PRESENT to the required access perm */ |
1144 | access |= _PAGE_PRESENT; | |
1145 | ||
1146 | /* Pre-check access permissions (will be re-checked atomically | |
1147 | * in __hash_page_XX but this pre-check is a fast path | |
1148 | */ | |
ac29c640 | 1149 | if (!check_pte_access(access, pte_val(*ptep))) { |
ca91e6c0 | 1150 | DBG_LOW(" no access !\n"); |
ba12eede LZ |
1151 | rc = 1; |
1152 | goto bail; | |
ca91e6c0 BH |
1153 | } |
1154 | ||
ba12eede | 1155 | if (hugeshift) { |
891121e6 | 1156 | if (is_thp) |
6d492ecc | 1157 | rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep, |
aefa5688 | 1158 | trap, flags, ssize, psize); |
6d492ecc AK |
1159 | #ifdef CONFIG_HUGETLB_PAGE |
1160 | else | |
1161 | rc = __hash_page_huge(ea, access, vsid, ptep, trap, | |
aefa5688 | 1162 | flags, ssize, hugeshift, psize); |
6d492ecc AK |
1163 | #else |
1164 | else { | |
1165 | /* | |
1166 | * if we have hugeshift, and is not transhuge with | |
1167 | * hugetlb disabled, something is really wrong. | |
1168 | */ | |
1169 | rc = 1; | |
1170 | WARN_ON(1); | |
1171 | } | |
1172 | #endif | |
a1dca346 IM |
1173 | if (current->mm == mm) |
1174 | check_paca_psize(ea, mm, psize, user_region); | |
09567e7f | 1175 | |
ba12eede LZ |
1176 | goto bail; |
1177 | } | |
a4fe3ce7 | 1178 | |
3c726f8d BH |
1179 | #ifndef CONFIG_PPC_64K_PAGES |
1180 | DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep)); | |
1181 | #else | |
1182 | DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep), | |
1183 | pte_val(*(ptep + PTRS_PER_PTE))); | |
1184 | #endif | |
3c726f8d | 1185 | /* Do actual hashing */ |
16c2d476 | 1186 | #ifdef CONFIG_PPC_64K_PAGES |
945537df AK |
1187 | /* If H_PAGE_4K_PFN is set, make sure this is a 4k segment */ |
1188 | if ((pte_val(*ptep) & H_PAGE_4K_PFN) && psize == MMU_PAGE_64K) { | |
721151d0 PM |
1189 | demote_segment_4k(mm, ea); |
1190 | psize = MMU_PAGE_4K; | |
1191 | } | |
1192 | ||
16f1c746 BH |
1193 | /* If this PTE is non-cacheable and we have restrictions on |
1194 | * using non cacheable large pages, then we switch to 4k | |
1195 | */ | |
30bda41a | 1196 | if (mmu_ci_restrictions && psize == MMU_PAGE_64K && pte_ci(*ptep)) { |
16f1c746 BH |
1197 | if (user_region) { |
1198 | demote_segment_4k(mm, ea); | |
1199 | psize = MMU_PAGE_4K; | |
1200 | } else if (ea < VMALLOC_END) { | |
1201 | /* | |
1202 | * some driver did a non-cacheable mapping | |
1203 | * in vmalloc space, so switch vmalloc | |
1204 | * to 4k pages | |
1205 | */ | |
1206 | printk(KERN_ALERT "Reducing vmalloc segment " | |
1207 | "to 4kB pages because of " | |
1208 | "non-cacheable mapping\n"); | |
1209 | psize = mmu_vmalloc_psize = MMU_PAGE_4K; | |
be3ebfe8 | 1210 | copro_flush_all_slbs(mm); |
bf72aeba | 1211 | } |
16f1c746 | 1212 | } |
09567e7f | 1213 | |
0863d7f2 AK |
1214 | #endif /* CONFIG_PPC_64K_PAGES */ |
1215 | ||
a1dca346 IM |
1216 | if (current->mm == mm) |
1217 | check_paca_psize(ea, mm, psize, user_region); | |
16f1c746 | 1218 | |
73b341ef | 1219 | #ifdef CONFIG_PPC_64K_PAGES |
bf72aeba | 1220 | if (psize == MMU_PAGE_64K) |
aefa5688 AK |
1221 | rc = __hash_page_64K(ea, access, vsid, ptep, trap, |
1222 | flags, ssize); | |
3c726f8d | 1223 | else |
73b341ef | 1224 | #endif /* CONFIG_PPC_64K_PAGES */ |
fa28237c | 1225 | { |
a1128f8f | 1226 | int spp = subpage_protection(mm, ea); |
fa28237c PM |
1227 | if (access & spp) |
1228 | rc = -2; | |
1229 | else | |
1230 | rc = __hash_page_4K(ea, access, vsid, ptep, trap, | |
aefa5688 | 1231 | flags, ssize, spp); |
fa28237c | 1232 | } |
3c726f8d | 1233 | |
4b8692c0 BH |
1234 | /* Dump some info in case of hash insertion failure, they should |
1235 | * never happen so it is really useful to know if/when they do | |
1236 | */ | |
1237 | if (rc == -1) | |
1238 | hash_failure_debug(ea, access, vsid, trap, ssize, psize, | |
d8139ebf | 1239 | psize, pte_val(*ptep)); |
3c726f8d BH |
1240 | #ifndef CONFIG_PPC_64K_PAGES |
1241 | DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep)); | |
1242 | #else | |
1243 | DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep), | |
1244 | pte_val(*(ptep + PTRS_PER_PTE))); | |
1245 | #endif | |
1246 | DBG_LOW(" -> rc=%d\n", rc); | |
ba12eede LZ |
1247 | |
1248 | bail: | |
1249 | exception_exit(prev_state); | |
3c726f8d | 1250 | return rc; |
1da177e4 | 1251 | } |
a1dca346 IM |
1252 | EXPORT_SYMBOL_GPL(hash_page_mm); |
1253 | ||
aefa5688 AK |
1254 | int hash_page(unsigned long ea, unsigned long access, unsigned long trap, |
1255 | unsigned long dsisr) | |
a1dca346 | 1256 | { |
aefa5688 | 1257 | unsigned long flags = 0; |
a1dca346 IM |
1258 | struct mm_struct *mm = current->mm; |
1259 | ||
1260 | if (REGION_ID(ea) == VMALLOC_REGION_ID) | |
1261 | mm = &init_mm; | |
1262 | ||
aefa5688 AK |
1263 | if (dsisr & DSISR_NOHPTE) |
1264 | flags |= HPTE_NOHPTE_UPDATE; | |
1265 | ||
1266 | return hash_page_mm(mm, ea, access, trap, flags); | |
a1dca346 | 1267 | } |
67207b96 | 1268 | EXPORT_SYMBOL_GPL(hash_page); |
1da177e4 | 1269 | |
106713a1 AK |
1270 | int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap, |
1271 | unsigned long dsisr) | |
1272 | { | |
c7d54842 | 1273 | unsigned long access = _PAGE_PRESENT | _PAGE_READ; |
106713a1 AK |
1274 | unsigned long flags = 0; |
1275 | struct mm_struct *mm = current->mm; | |
1276 | ||
1277 | if (REGION_ID(ea) == VMALLOC_REGION_ID) | |
1278 | mm = &init_mm; | |
1279 | ||
1280 | if (dsisr & DSISR_NOHPTE) | |
1281 | flags |= HPTE_NOHPTE_UPDATE; | |
1282 | ||
1283 | if (dsisr & DSISR_ISSTORE) | |
c7d54842 | 1284 | access |= _PAGE_WRITE; |
106713a1 | 1285 | /* |
ac29c640 AK |
1286 | * We set _PAGE_PRIVILEGED only when |
1287 | * kernel mode access kernel space. | |
1288 | * | |
1289 | * _PAGE_PRIVILEGED is NOT set | |
1290 | * 1) when kernel mode access user space | |
1291 | * 2) user space access kernel space. | |
106713a1 | 1292 | */ |
ac29c640 | 1293 | access |= _PAGE_PRIVILEGED; |
106713a1 | 1294 | if ((msr & MSR_PR) || (REGION_ID(ea) == USER_REGION_ID)) |
ac29c640 | 1295 | access &= ~_PAGE_PRIVILEGED; |
106713a1 AK |
1296 | |
1297 | if (trap == 0x400) | |
1298 | access |= _PAGE_EXEC; | |
1299 | ||
1300 | return hash_page_mm(mm, ea, access, trap, flags); | |
1301 | } | |
1302 | ||
3c726f8d BH |
1303 | void hash_preload(struct mm_struct *mm, unsigned long ea, |
1304 | unsigned long access, unsigned long trap) | |
1da177e4 | 1305 | { |
12bc9f6f | 1306 | int hugepage_shift; |
3c726f8d | 1307 | unsigned long vsid; |
0b97fee0 | 1308 | pgd_t *pgdir; |
3c726f8d | 1309 | pte_t *ptep; |
3c726f8d | 1310 | unsigned long flags; |
aefa5688 | 1311 | int rc, ssize, update_flags = 0; |
3c726f8d | 1312 | |
d0f13e3c BH |
1313 | BUG_ON(REGION_ID(ea) != USER_REGION_ID); |
1314 | ||
1315 | #ifdef CONFIG_PPC_MM_SLICES | |
1316 | /* We only prefault standard pages for now */ | |
2b02d139 | 1317 | if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize)) |
3c726f8d | 1318 | return; |
d0f13e3c | 1319 | #endif |
3c726f8d BH |
1320 | |
1321 | DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx," | |
1322 | " trap=%lx\n", mm, mm->pgd, ea, access, trap); | |
1da177e4 | 1323 | |
16f1c746 | 1324 | /* Get Linux PTE if available */ |
3c726f8d BH |
1325 | pgdir = mm->pgd; |
1326 | if (pgdir == NULL) | |
1327 | return; | |
0ac52dd7 AK |
1328 | |
1329 | /* Get VSID */ | |
1330 | ssize = user_segment_size(ea); | |
1331 | vsid = get_vsid(mm->context.id, ea, ssize); | |
1332 | if (!vsid) | |
1333 | return; | |
1334 | /* | |
1335 | * Hash doesn't like irqs. Walking linux page table with irq disabled | |
1336 | * saves us from holding multiple locks. | |
1337 | */ | |
1338 | local_irq_save(flags); | |
1339 | ||
12bc9f6f AK |
1340 | /* |
1341 | * THP pages use update_mmu_cache_pmd. We don't do | |
1342 | * hash preload there. Hence can ignore THP here | |
1343 | */ | |
891121e6 | 1344 | ptep = find_linux_pte_or_hugepte(pgdir, ea, NULL, &hugepage_shift); |
3c726f8d | 1345 | if (!ptep) |
0ac52dd7 | 1346 | goto out_exit; |
16f1c746 | 1347 | |
12bc9f6f | 1348 | WARN_ON(hugepage_shift); |
16f1c746 | 1349 | #ifdef CONFIG_PPC_64K_PAGES |
945537df | 1350 | /* If either H_PAGE_4K_PFN or cache inhibited is set (and we are on |
16f1c746 BH |
1351 | * a 64K kernel), then we don't preload, hash_page() will take |
1352 | * care of it once we actually try to access the page. | |
1353 | * That way we don't have to duplicate all of the logic for segment | |
1354 | * page size demotion here | |
1355 | */ | |
945537df | 1356 | if ((pte_val(*ptep) & H_PAGE_4K_PFN) || pte_ci(*ptep)) |
0ac52dd7 | 1357 | goto out_exit; |
16f1c746 BH |
1358 | #endif /* CONFIG_PPC_64K_PAGES */ |
1359 | ||
16c2d476 | 1360 | /* Is that local to this CPU ? */ |
56aa4129 | 1361 | if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) |
aefa5688 | 1362 | update_flags |= HPTE_LOCAL_UPDATE; |
16c2d476 BH |
1363 | |
1364 | /* Hash it in */ | |
73b341ef | 1365 | #ifdef CONFIG_PPC_64K_PAGES |
bf72aeba | 1366 | if (mm->context.user_psize == MMU_PAGE_64K) |
aefa5688 AK |
1367 | rc = __hash_page_64K(ea, access, vsid, ptep, trap, |
1368 | update_flags, ssize); | |
1da177e4 | 1369 | else |
73b341ef | 1370 | #endif /* CONFIG_PPC_64K_PAGES */ |
aefa5688 AK |
1371 | rc = __hash_page_4K(ea, access, vsid, ptep, trap, update_flags, |
1372 | ssize, subpage_protection(mm, ea)); | |
4b8692c0 BH |
1373 | |
1374 | /* Dump some info in case of hash insertion failure, they should | |
1375 | * never happen so it is really useful to know if/when they do | |
1376 | */ | |
1377 | if (rc == -1) | |
1378 | hash_failure_debug(ea, access, vsid, trap, ssize, | |
d8139ebf AK |
1379 | mm->context.user_psize, |
1380 | mm->context.user_psize, | |
1381 | pte_val(*ptep)); | |
0ac52dd7 | 1382 | out_exit: |
3c726f8d BH |
1383 | local_irq_restore(flags); |
1384 | } | |
1385 | ||
f6ab0b92 BH |
1386 | /* WARNING: This is called from hash_low_64.S, if you change this prototype, |
1387 | * do not forget to update the assembly call site ! | |
1388 | */ | |
5524a27d | 1389 | void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, |
aefa5688 | 1390 | unsigned long flags) |
3c726f8d BH |
1391 | { |
1392 | unsigned long hash, index, shift, hidx, slot; | |
aefa5688 | 1393 | int local = flags & HPTE_LOCAL_UPDATE; |
3c726f8d | 1394 | |
5524a27d AK |
1395 | DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn); |
1396 | pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { | |
1397 | hash = hpt_hash(vpn, shift, ssize); | |
3c726f8d BH |
1398 | hidx = __rpte_to_hidx(pte, index); |
1399 | if (hidx & _PTEIDX_SECONDARY) | |
1400 | hash = ~hash; | |
1401 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | |
1402 | slot += hidx & _PTEIDX_GROUP_IX; | |
5c339919 | 1403 | DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx); |
db3d8534 AK |
1404 | /* |
1405 | * We use same base page size and actual psize, because we don't | |
1406 | * use these functions for hugepage | |
1407 | */ | |
1408 | ppc_md.hpte_invalidate(slot, vpn, psize, psize, ssize, local); | |
3c726f8d | 1409 | } pte_iterate_hashed_end(); |
bc2a9408 MN |
1410 | |
1411 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
1412 | /* Transactions are not aborted by tlbiel, only tlbie. | |
1413 | * Without, syncing a page back to a block device w/ PIO could pick up | |
1414 | * transactional data (bad!) so we force an abort here. Before the | |
1415 | * sync the page will be made read-only, which will flush_hash_page. | |
1416 | * BIG ISSUE here: if the kernel uses a page from userspace without | |
1417 | * unmapping it first, it may see the speculated version. | |
1418 | */ | |
1419 | if (local && cpu_has_feature(CPU_FTR_TM) && | |
c2fd22df | 1420 | current->thread.regs && |
bc2a9408 MN |
1421 | MSR_TM_ACTIVE(current->thread.regs->msr)) { |
1422 | tm_enable(); | |
1423 | tm_abort(TM_CAUSE_TLBI); | |
1424 | } | |
1425 | #endif | |
1da177e4 LT |
1426 | } |
1427 | ||
f1581bf1 AK |
1428 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1429 | void flush_hash_hugepage(unsigned long vsid, unsigned long addr, | |
aefa5688 AK |
1430 | pmd_t *pmdp, unsigned int psize, int ssize, |
1431 | unsigned long flags) | |
f1581bf1 AK |
1432 | { |
1433 | int i, max_hpte_count, valid; | |
1434 | unsigned long s_addr; | |
1435 | unsigned char *hpte_slot_array; | |
1436 | unsigned long hidx, shift, vpn, hash, slot; | |
aefa5688 | 1437 | int local = flags & HPTE_LOCAL_UPDATE; |
f1581bf1 AK |
1438 | |
1439 | s_addr = addr & HPAGE_PMD_MASK; | |
1440 | hpte_slot_array = get_hpte_slot_array(pmdp); | |
1441 | /* | |
1442 | * IF we try to do a HUGE PTE update after a withdraw is done. | |
1443 | * we will find the below NULL. This happens when we do | |
1444 | * split_huge_page_pmd | |
1445 | */ | |
1446 | if (!hpte_slot_array) | |
1447 | return; | |
1448 | ||
d557b098 AK |
1449 | if (ppc_md.hugepage_invalidate) { |
1450 | ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array, | |
1451 | psize, ssize, local); | |
1452 | goto tm_abort; | |
1453 | } | |
f1581bf1 AK |
1454 | /* |
1455 | * No bluk hpte removal support, invalidate each entry | |
1456 | */ | |
1457 | shift = mmu_psize_defs[psize].shift; | |
1458 | max_hpte_count = HPAGE_PMD_SIZE >> shift; | |
1459 | for (i = 0; i < max_hpte_count; i++) { | |
1460 | /* | |
1461 | * 8 bits per each hpte entries | |
1462 | * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit] | |
1463 | */ | |
1464 | valid = hpte_valid(hpte_slot_array, i); | |
1465 | if (!valid) | |
1466 | continue; | |
1467 | hidx = hpte_hash_index(hpte_slot_array, i); | |
1468 | ||
1469 | /* get the vpn */ | |
1470 | addr = s_addr + (i * (1ul << shift)); | |
1471 | vpn = hpt_vpn(addr, vsid, ssize); | |
1472 | hash = hpt_hash(vpn, shift, ssize); | |
1473 | if (hidx & _PTEIDX_SECONDARY) | |
1474 | hash = ~hash; | |
1475 | ||
1476 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | |
1477 | slot += hidx & _PTEIDX_GROUP_IX; | |
1478 | ppc_md.hpte_invalidate(slot, vpn, psize, | |
d557b098 AK |
1479 | MMU_PAGE_16M, ssize, local); |
1480 | } | |
1481 | tm_abort: | |
1482 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
1483 | /* Transactions are not aborted by tlbiel, only tlbie. | |
1484 | * Without, syncing a page back to a block device w/ PIO could pick up | |
1485 | * transactional data (bad!) so we force an abort here. Before the | |
1486 | * sync the page will be made read-only, which will flush_hash_page. | |
1487 | * BIG ISSUE here: if the kernel uses a page from userspace without | |
1488 | * unmapping it first, it may see the speculated version. | |
1489 | */ | |
1490 | if (local && cpu_has_feature(CPU_FTR_TM) && | |
1491 | current->thread.regs && | |
1492 | MSR_TM_ACTIVE(current->thread.regs->msr)) { | |
1493 | tm_enable(); | |
1494 | tm_abort(TM_CAUSE_TLBI); | |
f1581bf1 | 1495 | } |
d557b098 | 1496 | #endif |
2e826695 | 1497 | return; |
f1581bf1 AK |
1498 | } |
1499 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
1500 | ||
61b1a942 | 1501 | void flush_hash_range(unsigned long number, int local) |
1da177e4 | 1502 | { |
3c726f8d | 1503 | if (ppc_md.flush_hash_range) |
61b1a942 | 1504 | ppc_md.flush_hash_range(number, local); |
3c726f8d | 1505 | else { |
1da177e4 | 1506 | int i; |
61b1a942 | 1507 | struct ppc64_tlb_batch *batch = |
69111bac | 1508 | this_cpu_ptr(&ppc64_tlb_batch); |
1da177e4 LT |
1509 | |
1510 | for (i = 0; i < number; i++) | |
5524a27d | 1511 | flush_hash_page(batch->vpn[i], batch->pte[i], |
1189be65 | 1512 | batch->psize, batch->ssize, local); |
1da177e4 LT |
1513 | } |
1514 | } | |
1515 | ||
1da177e4 LT |
1516 | /* |
1517 | * low_hash_fault is called when we the low level hash code failed | |
1518 | * to instert a PTE due to an hypervisor error | |
1519 | */ | |
fa28237c | 1520 | void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) |
1da177e4 | 1521 | { |
ba12eede LZ |
1522 | enum ctx_state prev_state = exception_enter(); |
1523 | ||
1da177e4 | 1524 | if (user_mode(regs)) { |
fa28237c PM |
1525 | #ifdef CONFIG_PPC_SUBPAGE_PROT |
1526 | if (rc == -2) | |
1527 | _exception(SIGSEGV, regs, SEGV_ACCERR, address); | |
1528 | else | |
1529 | #endif | |
1530 | _exception(SIGBUS, regs, BUS_ADRERR, address); | |
1531 | } else | |
1532 | bad_page_fault(regs, address, SIGBUS); | |
ba12eede LZ |
1533 | |
1534 | exception_exit(prev_state); | |
1da177e4 | 1535 | } |
370a908d | 1536 | |
b170bd3d LZ |
1537 | long hpte_insert_repeating(unsigned long hash, unsigned long vpn, |
1538 | unsigned long pa, unsigned long rflags, | |
1539 | unsigned long vflags, int psize, int ssize) | |
1540 | { | |
1541 | unsigned long hpte_group; | |
1542 | long slot; | |
1543 | ||
1544 | repeat: | |
1545 | hpte_group = ((hash & htab_hash_mask) * | |
1546 | HPTES_PER_GROUP) & ~0x7UL; | |
1547 | ||
1548 | /* Insert into the hash table, primary slot */ | |
1549 | slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, vflags, | |
b1022fbd | 1550 | psize, psize, ssize); |
b170bd3d LZ |
1551 | |
1552 | /* Primary is full, try the secondary */ | |
1553 | if (unlikely(slot == -1)) { | |
1554 | hpte_group = ((~hash & htab_hash_mask) * | |
1555 | HPTES_PER_GROUP) & ~0x7UL; | |
1556 | slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, | |
1557 | vflags | HPTE_V_SECONDARY, | |
b1022fbd | 1558 | psize, psize, ssize); |
b170bd3d LZ |
1559 | if (slot == -1) { |
1560 | if (mftb() & 0x1) | |
1561 | hpte_group = ((hash & htab_hash_mask) * | |
1562 | HPTES_PER_GROUP)&~0x7UL; | |
1563 | ||
1564 | ppc_md.hpte_remove(hpte_group); | |
1565 | goto repeat; | |
1566 | } | |
1567 | } | |
1568 | ||
1569 | return slot; | |
1570 | } | |
1571 | ||
370a908d BH |
1572 | #ifdef CONFIG_DEBUG_PAGEALLOC |
1573 | static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) | |
1574 | { | |
016af59f | 1575 | unsigned long hash; |
1189be65 | 1576 | unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); |
5524a27d | 1577 | unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); |
09f3f326 | 1578 | unsigned long mode = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL)); |
016af59f | 1579 | long ret; |
370a908d | 1580 | |
5524a27d | 1581 | hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); |
370a908d | 1582 | |
c60ac569 AK |
1583 | /* Don't create HPTE entries for bad address */ |
1584 | if (!vsid) | |
1585 | return; | |
016af59f LZ |
1586 | |
1587 | ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode, | |
1588 | HPTE_V_BOLTED, | |
1589 | mmu_linear_psize, mmu_kernel_ssize); | |
1590 | ||
370a908d BH |
1591 | BUG_ON (ret < 0); |
1592 | spin_lock(&linear_map_hash_lock); | |
1593 | BUG_ON(linear_map_hash_slots[lmi] & 0x80); | |
1594 | linear_map_hash_slots[lmi] = ret | 0x80; | |
1595 | spin_unlock(&linear_map_hash_lock); | |
1596 | } | |
1597 | ||
1598 | static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) | |
1599 | { | |
1189be65 PM |
1600 | unsigned long hash, hidx, slot; |
1601 | unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); | |
5524a27d | 1602 | unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); |
370a908d | 1603 | |
5524a27d | 1604 | hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); |
370a908d BH |
1605 | spin_lock(&linear_map_hash_lock); |
1606 | BUG_ON(!(linear_map_hash_slots[lmi] & 0x80)); | |
1607 | hidx = linear_map_hash_slots[lmi] & 0x7f; | |
1608 | linear_map_hash_slots[lmi] = 0; | |
1609 | spin_unlock(&linear_map_hash_lock); | |
1610 | if (hidx & _PTEIDX_SECONDARY) | |
1611 | hash = ~hash; | |
1612 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | |
1613 | slot += hidx & _PTEIDX_GROUP_IX; | |
db3d8534 AK |
1614 | ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_linear_psize, |
1615 | mmu_kernel_ssize, 0); | |
370a908d BH |
1616 | } |
1617 | ||
031bc574 | 1618 | void __kernel_map_pages(struct page *page, int numpages, int enable) |
370a908d BH |
1619 | { |
1620 | unsigned long flags, vaddr, lmi; | |
1621 | int i; | |
1622 | ||
1623 | local_irq_save(flags); | |
1624 | for (i = 0; i < numpages; i++, page++) { | |
1625 | vaddr = (unsigned long)page_address(page); | |
1626 | lmi = __pa(vaddr) >> PAGE_SHIFT; | |
1627 | if (lmi >= linear_map_hash_count) | |
1628 | continue; | |
1629 | if (enable) | |
1630 | kernel_map_linear_page(vaddr, lmi); | |
1631 | else | |
1632 | kernel_unmap_linear_page(vaddr, lmi); | |
1633 | } | |
1634 | local_irq_restore(flags); | |
1635 | } | |
1636 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | |
cd3db0c4 BH |
1637 | |
1638 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | |
1639 | phys_addr_t first_memblock_size) | |
1640 | { | |
1641 | /* We don't currently support the first MEMBLOCK not mapping 0 | |
1642 | * physical on those processors | |
1643 | */ | |
1644 | BUG_ON(first_memblock_base != 0); | |
1645 | ||
1646 | /* On LPAR systems, the first entry is our RMA region, | |
1647 | * non-LPAR 64-bit hash MMU systems don't have a limitation | |
1648 | * on real mode access, but using the first entry works well | |
1649 | * enough. We also clamp it to 1G to avoid some funky things | |
1650 | * such as RTAS bugs etc... | |
1651 | */ | |
1652 | ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); | |
1653 | ||
1654 | /* Finally limit subsequent allocations */ | |
1655 | memblock_set_current_limit(ppc64_rma_size); | |
1656 | } |