Commit | Line | Data |
---|---|---|
3dfcb315 AK |
1 | #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ |
2 | #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ | |
2e873519 AK |
3 | |
4 | /* | |
5 | * Common bits between hash and Radix page table | |
6 | */ | |
7 | #define _PAGE_BIT_SWAP_TYPE 0 | |
8 | ||
9 | #define _PAGE_EXEC 0x00001 /* execute permission */ | |
10 | #define _PAGE_WRITE 0x00002 /* write access allowed */ | |
11 | #define _PAGE_READ 0x00004 /* read access allowed */ | |
12 | #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE) | |
13 | #define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC) | |
14 | #define _PAGE_PRIVILEGED 0x00008 /* kernel access only */ | |
15 | #define _PAGE_SAO 0x00010 /* Strong access order */ | |
16 | #define _PAGE_NON_IDEMPOTENT 0x00020 /* non idempotent memory */ | |
17 | #define _PAGE_TOLERANT 0x00030 /* tolerant memory, cache inhibited */ | |
18 | #define _PAGE_DIRTY 0x00080 /* C: page changed */ | |
19 | #define _PAGE_ACCESSED 0x00100 /* R: page referenced */ | |
20 | /* | |
21 | * Software bits | |
22 | */ | |
23 | #ifdef CONFIG_MEM_SOFT_DIRTY | |
24 | #define _PAGE_SOFT_DIRTY 0x00200 /* software: software dirty tracking */ | |
25 | #else | |
26 | #define _PAGE_SOFT_DIRTY 0x00000 | |
27 | #endif | |
28 | #define _PAGE_SPECIAL 0x00400 /* software: special page */ | |
29 | ||
30 | ||
31 | #define _PAGE_PTE (1ul << 62) /* distinguishes PTEs from pointers */ | |
32 | #define _PAGE_PRESENT (1ul << 63) /* pte contains a translation */ | |
33 | /* | |
34 | * Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE | |
35 | * Instead of fixing all of them, add an alternate define which | |
36 | * maps CI pte mapping. | |
37 | */ | |
38 | #define _PAGE_NO_CACHE _PAGE_TOLERANT | |
39 | /* | |
40 | * We support 57 bit real address in pte. Clear everything above 57, and | |
41 | * every thing below PAGE_SHIFT; | |
42 | */ | |
43 | #define PTE_RPN_MASK (((1UL << 57) - 1) & (PAGE_MASK)) | |
44 | /* | |
45 | * set of bits not changed in pmd_modify. Even though we have hash specific bits | |
46 | * in here, on radix we expect them to be zero. | |
47 | */ | |
48 | #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ | |
49 | _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \ | |
50 | _PAGE_SOFT_DIRTY) | |
51 | /* | |
52 | * user access blocked by key | |
53 | */ | |
54 | #define _PAGE_KERNEL_RW (_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY) | |
55 | #define _PAGE_KERNEL_RO (_PAGE_PRIVILEGED | _PAGE_READ) | |
56 | #define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | \ | |
57 | _PAGE_RW | _PAGE_EXEC) | |
58 | /* | |
59 | * No page size encoding in the linux PTE | |
60 | */ | |
61 | #define _PAGE_PSIZE 0 | |
62 | /* | |
63 | * _PAGE_CHG_MASK masks of bits that are to be preserved across | |
64 | * pgprot changes | |
65 | */ | |
66 | #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ | |
67 | _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \ | |
68 | _PAGE_SOFT_DIRTY) | |
69 | /* | |
70 | * Mask of bits returned by pte_pgprot() | |
71 | */ | |
72 | #define PAGE_PROT_BITS (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT | \ | |
73 | H_PAGE_4K_PFN | _PAGE_PRIVILEGED | _PAGE_ACCESSED | \ | |
74 | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_EXEC | \ | |
75 | _PAGE_SOFT_DIRTY) | |
3dfcb315 | 76 | /* |
2e873519 AK |
77 | * We define 2 sets of base prot bits, one for basic pages (ie, |
78 | * cacheable kernel and user pages) and one for non cacheable | |
79 | * pages. We always set _PAGE_COHERENT when SMP is enabled or | |
80 | * the processor might need it for DMA coherency. | |
3dfcb315 | 81 | */ |
2e873519 AK |
82 | #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) |
83 | #define _PAGE_BASE (_PAGE_BASE_NC) | |
84 | ||
85 | /* Permission masks used to generate the __P and __S table, | |
86 | * | |
87 | * Note:__pgprot is defined in arch/powerpc/include/asm/page.h | |
88 | * | |
89 | * Write permissions imply read permissions for now (we could make write-only | |
90 | * pages on BookE but we don't bother for now). Execute permission control is | |
91 | * possible on platforms that define _PAGE_EXEC | |
92 | * | |
93 | * Note due to the way vm flags are laid out, the bits are XWR | |
94 | */ | |
95 | #define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_PRIVILEGED) | |
96 | #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW) | |
97 | #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_EXEC) | |
98 | #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_READ) | |
99 | #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) | |
100 | #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_READ) | |
101 | #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) | |
102 | ||
103 | #define __P000 PAGE_NONE | |
104 | #define __P001 PAGE_READONLY | |
105 | #define __P010 PAGE_COPY | |
106 | #define __P011 PAGE_COPY | |
107 | #define __P100 PAGE_READONLY_X | |
108 | #define __P101 PAGE_READONLY_X | |
109 | #define __P110 PAGE_COPY_X | |
110 | #define __P111 PAGE_COPY_X | |
111 | ||
112 | #define __S000 PAGE_NONE | |
113 | #define __S001 PAGE_READONLY | |
114 | #define __S010 PAGE_SHARED | |
115 | #define __S011 PAGE_SHARED | |
116 | #define __S100 PAGE_READONLY_X | |
117 | #define __S101 PAGE_READONLY_X | |
118 | #define __S110 PAGE_SHARED_X | |
119 | #define __S111 PAGE_SHARED_X | |
120 | ||
121 | /* Permission masks used for kernel mappings */ | |
122 | #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) | |
123 | #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ | |
124 | _PAGE_TOLERANT) | |
125 | #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ | |
126 | _PAGE_NON_IDEMPOTENT) | |
127 | #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) | |
128 | #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) | |
129 | #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) | |
130 | ||
131 | /* | |
132 | * Protection used for kernel text. We want the debuggers to be able to | |
133 | * set breakpoints anywhere, so don't write protect the kernel text | |
134 | * on platforms where such control is possible. | |
135 | */ | |
136 | #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \ | |
137 | defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) | |
138 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_X | |
139 | #else | |
140 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX | |
141 | #endif | |
142 | ||
143 | /* Make modules code happy. We don't set RO yet */ | |
144 | #define PAGE_KERNEL_EXEC PAGE_KERNEL_X | |
145 | #define PAGE_AGP (PAGE_KERNEL_NC) | |
3dfcb315 | 146 | |
dd1842a2 AK |
147 | #ifndef __ASSEMBLY__ |
148 | /* | |
149 | * page table defines | |
150 | */ | |
151 | extern unsigned long __pte_index_size; | |
152 | extern unsigned long __pmd_index_size; | |
153 | extern unsigned long __pud_index_size; | |
154 | extern unsigned long __pgd_index_size; | |
155 | extern unsigned long __pmd_cache_index; | |
156 | #define PTE_INDEX_SIZE __pte_index_size | |
157 | #define PMD_INDEX_SIZE __pmd_index_size | |
158 | #define PUD_INDEX_SIZE __pud_index_size | |
159 | #define PGD_INDEX_SIZE __pgd_index_size | |
160 | #define PMD_CACHE_INDEX __pmd_cache_index | |
161 | /* | |
162 | * Because of use of pte fragments and THP, size of page table | |
163 | * are not always derived out of index size above. | |
164 | */ | |
165 | extern unsigned long __pte_table_size; | |
166 | extern unsigned long __pmd_table_size; | |
167 | extern unsigned long __pud_table_size; | |
168 | extern unsigned long __pgd_table_size; | |
169 | #define PTE_TABLE_SIZE __pte_table_size | |
170 | #define PMD_TABLE_SIZE __pmd_table_size | |
171 | #define PUD_TABLE_SIZE __pud_table_size | |
172 | #define PGD_TABLE_SIZE __pgd_table_size | |
173 | /* | |
174 | * Pgtable size used by swapper, init in asm code | |
175 | * We will switch this later to radix PGD | |
176 | */ | |
177 | #define MAX_PGD_TABLE_SIZE (sizeof(pgd_t) << H_PGD_INDEX_SIZE) | |
178 | ||
179 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) | |
180 | #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) | |
181 | #define PTRS_PER_PUD (1 << PUD_INDEX_SIZE) | |
182 | #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) | |
183 | ||
184 | /* PMD_SHIFT determines what a second-level page table entry can map */ | |
185 | #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) | |
186 | #define PMD_SIZE (1UL << PMD_SHIFT) | |
187 | #define PMD_MASK (~(PMD_SIZE-1)) | |
188 | ||
189 | /* PUD_SHIFT determines what a third-level page table entry can map */ | |
190 | #define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) | |
191 | #define PUD_SIZE (1UL << PUD_SHIFT) | |
192 | #define PUD_MASK (~(PUD_SIZE-1)) | |
193 | ||
194 | /* PGDIR_SHIFT determines what a fourth-level page table entry can map */ | |
195 | #define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE) | |
196 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | |
197 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
198 | ||
199 | /* Bits to mask out from a PMD to get to the PTE page */ | |
200 | #define PMD_MASKED_BITS 0xc0000000000000ffUL | |
201 | /* Bits to mask out from a PUD to get to the PMD page */ | |
202 | #define PUD_MASKED_BITS 0xc0000000000000ffUL | |
203 | /* Bits to mask out from a PGD to get to the PUD page */ | |
204 | #define PGD_MASKED_BITS 0xc0000000000000ffUL | |
205 | #endif /* __ASSEMBLY__ */ | |
206 | ||
ab537dca | 207 | #include <asm/book3s/64/hash.h> |
3dfcb315 AK |
208 | #include <asm/barrier.h> |
209 | ||
3dfcb315 AK |
210 | /* |
211 | * The second half of the kernel virtual space is used for IO mappings, | |
212 | * it's itself carved into the PIO region (ISA and PHB IO space) and | |
213 | * the ioremap space | |
214 | * | |
215 | * ISA_IO_BASE = KERN_IO_START, 64K reserved area | |
216 | * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces | |
217 | * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE | |
218 | */ | |
219 | #define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1)) | |
220 | #define FULL_IO_SIZE 0x80000000ul | |
221 | #define ISA_IO_BASE (KERN_IO_START) | |
222 | #define ISA_IO_END (KERN_IO_START + 0x10000ul) | |
223 | #define PHB_IO_BASE (ISA_IO_END) | |
224 | #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) | |
225 | #define IOREMAP_BASE (PHB_IO_END) | |
226 | #define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE) | |
227 | ||
3dfcb315 AK |
228 | #define vmemmap ((struct page *)VMEMMAP_BASE) |
229 | ||
b0412ea9 | 230 | /* Advertise special mapping type for AGP */ |
b0412ea9 AK |
231 | #define HAVE_PAGE_AGP |
232 | ||
233 | /* Advertise support for _PAGE_SPECIAL */ | |
234 | #define __HAVE_ARCH_PTE_SPECIAL | |
235 | ||
3dfcb315 AK |
236 | #ifndef __ASSEMBLY__ |
237 | ||
238 | /* | |
239 | * This is the default implementation of various PTE accessors, it's | |
240 | * used in all cases except Book3S with 64K pages where we have a | |
241 | * concept of sub-pages | |
242 | */ | |
243 | #ifndef __real_pte | |
244 | ||
3dfcb315 AK |
245 | #define __real_pte(e,p) ((real_pte_t){(e)}) |
246 | #define __rpte_to_pte(r) ((r).pte) | |
945537df | 247 | #define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT) |
3dfcb315 AK |
248 | |
249 | #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ | |
250 | do { \ | |
251 | index = 0; \ | |
252 | shift = mmu_psize_defs[psize].shift; \ | |
253 | ||
254 | #define pte_iterate_hashed_end() } while(0) | |
255 | ||
256 | /* | |
257 | * We expect this to be called only for user addresses or kernel virtual | |
258 | * addresses other than the linear mapping. | |
259 | */ | |
260 | #define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K | |
261 | ||
262 | #endif /* __real_pte */ | |
263 | ||
13f829a5 AK |
264 | /* |
265 | * For hash even if we have _PAGE_ACCESSED = 0, we do a pte_update. | |
266 | * We currently remove entries from the hashtable regardless of whether | |
267 | * the entry was young or dirty. | |
268 | * | |
269 | * We should be more intelligent about this but for the moment we override | |
270 | * these functions and force a tlb flush unconditionally | |
271 | * For radix: H_PAGE_HASHPTE should be zero. Hence we can use the same | |
272 | * function for both hash and radix. | |
273 | */ | |
274 | static inline int __ptep_test_and_clear_young(struct mm_struct *mm, | |
275 | unsigned long addr, pte_t *ptep) | |
276 | { | |
277 | unsigned long old; | |
278 | ||
279 | if ((pte_val(*ptep) & (_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0) | |
280 | return 0; | |
281 | old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); | |
282 | return (old & _PAGE_ACCESSED) != 0; | |
283 | } | |
284 | ||
285 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
286 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ | |
287 | ({ \ | |
288 | int __r; \ | |
289 | __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ | |
290 | __r; \ | |
291 | }) | |
292 | ||
293 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
294 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |
295 | pte_t *ptep) | |
296 | { | |
297 | ||
298 | if ((pte_val(*ptep) & _PAGE_WRITE) == 0) | |
299 | return; | |
300 | ||
301 | pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0); | |
302 | } | |
303 | ||
304 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | |
305 | unsigned long addr, pte_t *ptep) | |
306 | { | |
307 | if ((pte_val(*ptep) & _PAGE_WRITE) == 0) | |
308 | return; | |
309 | ||
310 | pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1); | |
311 | } | |
312 | ||
313 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
314 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | |
315 | unsigned long addr, pte_t *ptep) | |
316 | { | |
317 | unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); | |
318 | return __pte(old); | |
319 | } | |
320 | ||
321 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, | |
322 | pte_t * ptep) | |
323 | { | |
324 | pte_update(mm, addr, ptep, ~0UL, 0, 0); | |
325 | } | |
326 | static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_WRITE);} | |
327 | static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); } | |
328 | static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); } | |
329 | static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } | |
330 | static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } | |
331 | ||
332 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY | |
333 | static inline bool pte_soft_dirty(pte_t pte) | |
334 | { | |
335 | return !!(pte_val(pte) & _PAGE_SOFT_DIRTY); | |
336 | } | |
337 | static inline pte_t pte_mksoft_dirty(pte_t pte) | |
338 | { | |
339 | return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY); | |
340 | } | |
341 | ||
342 | static inline pte_t pte_clear_soft_dirty(pte_t pte) | |
343 | { | |
344 | return __pte(pte_val(pte) & ~_PAGE_SOFT_DIRTY); | |
345 | } | |
346 | #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ | |
347 | ||
348 | #ifdef CONFIG_NUMA_BALANCING | |
349 | /* | |
350 | * These work without NUMA balancing but the kernel does not care. See the | |
351 | * comment in include/asm-generic/pgtable.h . On powerpc, this will only | |
352 | * work for user pages and always return true for kernel pages. | |
353 | */ | |
354 | static inline int pte_protnone(pte_t pte) | |
355 | { | |
356 | return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PRIVILEGED)) == | |
357 | (_PAGE_PRESENT | _PAGE_PRIVILEGED); | |
358 | } | |
359 | #endif /* CONFIG_NUMA_BALANCING */ | |
360 | ||
361 | static inline int pte_present(pte_t pte) | |
362 | { | |
363 | return !!(pte_val(pte) & _PAGE_PRESENT); | |
364 | } | |
365 | /* | |
366 | * Conversion functions: convert a page and protection to a page entry, | |
367 | * and a page entry and page directory to the page they refer to. | |
368 | * | |
369 | * Even if PTEs can be unsigned long long, a PFN is always an unsigned | |
370 | * long for now. | |
371 | */ | |
372 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | |
373 | { | |
374 | return __pte((((pte_basic_t)(pfn) << PAGE_SHIFT) & PTE_RPN_MASK) | | |
375 | pgprot_val(pgprot)); | |
376 | } | |
377 | ||
378 | static inline unsigned long pte_pfn(pte_t pte) | |
379 | { | |
380 | return (pte_val(pte) & PTE_RPN_MASK) >> PAGE_SHIFT; | |
381 | } | |
382 | ||
383 | /* Generic modifiers for PTE bits */ | |
384 | static inline pte_t pte_wrprotect(pte_t pte) | |
385 | { | |
386 | return __pte(pte_val(pte) & ~_PAGE_WRITE); | |
387 | } | |
388 | ||
389 | static inline pte_t pte_mkclean(pte_t pte) | |
390 | { | |
391 | return __pte(pte_val(pte) & ~_PAGE_DIRTY); | |
392 | } | |
393 | ||
394 | static inline pte_t pte_mkold(pte_t pte) | |
395 | { | |
396 | return __pte(pte_val(pte) & ~_PAGE_ACCESSED); | |
397 | } | |
398 | ||
399 | static inline pte_t pte_mkwrite(pte_t pte) | |
400 | { | |
401 | /* | |
402 | * write implies read, hence set both | |
403 | */ | |
404 | return __pte(pte_val(pte) | _PAGE_RW); | |
405 | } | |
406 | ||
407 | static inline pte_t pte_mkdirty(pte_t pte) | |
408 | { | |
409 | return __pte(pte_val(pte) | _PAGE_DIRTY | _PAGE_SOFT_DIRTY); | |
410 | } | |
411 | ||
412 | static inline pte_t pte_mkyoung(pte_t pte) | |
413 | { | |
414 | return __pte(pte_val(pte) | _PAGE_ACCESSED); | |
415 | } | |
416 | ||
417 | static inline pte_t pte_mkspecial(pte_t pte) | |
418 | { | |
419 | return __pte(pte_val(pte) | _PAGE_SPECIAL); | |
420 | } | |
421 | ||
422 | static inline pte_t pte_mkhuge(pte_t pte) | |
423 | { | |
424 | return pte; | |
425 | } | |
426 | ||
427 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |
428 | { | |
429 | /* FIXME!! check whether this need to be a conditional */ | |
430 | return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); | |
431 | } | |
432 | ||
433 | #define _PAGE_CACHE_CTL (_PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT) | |
434 | ||
435 | #define pgprot_noncached pgprot_noncached | |
436 | static inline pgprot_t pgprot_noncached(pgprot_t prot) | |
437 | { | |
438 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | |
439 | _PAGE_NON_IDEMPOTENT); | |
440 | } | |
441 | ||
442 | #define pgprot_noncached_wc pgprot_noncached_wc | |
443 | static inline pgprot_t pgprot_noncached_wc(pgprot_t prot) | |
444 | { | |
445 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | |
446 | _PAGE_TOLERANT); | |
447 | } | |
448 | ||
449 | #define pgprot_cached pgprot_cached | |
450 | static inline pgprot_t pgprot_cached(pgprot_t prot) | |
451 | { | |
452 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL)); | |
453 | } | |
454 | ||
455 | #define pgprot_writecombine pgprot_writecombine | |
456 | static inline pgprot_t pgprot_writecombine(pgprot_t prot) | |
457 | { | |
458 | return pgprot_noncached_wc(prot); | |
459 | } | |
460 | /* | |
461 | * check a pte mapping have cache inhibited property | |
462 | */ | |
463 | static inline bool pte_ci(pte_t pte) | |
464 | { | |
465 | unsigned long pte_v = pte_val(pte); | |
466 | ||
467 | if (((pte_v & _PAGE_CACHE_CTL) == _PAGE_TOLERANT) || | |
468 | ((pte_v & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)) | |
469 | return true; | |
470 | return false; | |
471 | } | |
472 | ||
f281b5d5 AK |
473 | static inline void pmd_set(pmd_t *pmdp, unsigned long val) |
474 | { | |
475 | *pmdp = __pmd(val); | |
476 | } | |
477 | ||
478 | static inline void pmd_clear(pmd_t *pmdp) | |
479 | { | |
480 | *pmdp = __pmd(0); | |
481 | } | |
482 | ||
3dfcb315 | 483 | #define pmd_none(pmd) (!pmd_val(pmd)) |
3dfcb315 | 484 | #define pmd_present(pmd) (!pmd_none(pmd)) |
3dfcb315 | 485 | |
f281b5d5 AK |
486 | static inline void pud_set(pud_t *pudp, unsigned long val) |
487 | { | |
488 | *pudp = __pud(val); | |
489 | } | |
490 | ||
491 | static inline void pud_clear(pud_t *pudp) | |
492 | { | |
493 | *pudp = __pud(0); | |
494 | } | |
495 | ||
3dfcb315 | 496 | #define pud_none(pud) (!pud_val(pud)) |
3dfcb315 | 497 | #define pud_present(pud) (pud_val(pud) != 0) |
3dfcb315 AK |
498 | |
499 | extern struct page *pud_page(pud_t pud); | |
371352ca | 500 | extern struct page *pmd_page(pmd_t pmd); |
3dfcb315 AK |
501 | static inline pte_t pud_pte(pud_t pud) |
502 | { | |
503 | return __pte(pud_val(pud)); | |
504 | } | |
505 | ||
506 | static inline pud_t pte_pud(pte_t pte) | |
507 | { | |
508 | return __pud(pte_val(pte)); | |
509 | } | |
510 | #define pud_write(pud) pte_write(pud_pte(pud)) | |
3dfcb315 | 511 | #define pgd_write(pgd) pte_write(pgd_pte(pgd)) |
f281b5d5 AK |
512 | static inline void pgd_set(pgd_t *pgdp, unsigned long val) |
513 | { | |
514 | *pgdp = __pgd(val); | |
515 | } | |
3dfcb315 | 516 | |
368ced78 AK |
517 | static inline void pgd_clear(pgd_t *pgdp) |
518 | { | |
519 | *pgdp = __pgd(0); | |
520 | } | |
521 | ||
522 | #define pgd_none(pgd) (!pgd_val(pgd)) | |
523 | #define pgd_present(pgd) (!pgd_none(pgd)) | |
524 | ||
525 | static inline pte_t pgd_pte(pgd_t pgd) | |
526 | { | |
527 | return __pte(pgd_val(pgd)); | |
528 | } | |
529 | ||
530 | static inline pgd_t pte_pgd(pte_t pte) | |
531 | { | |
532 | return __pgd(pte_val(pte)); | |
533 | } | |
534 | ||
535 | extern struct page *pgd_page(pgd_t pgd); | |
536 | ||
3dfcb315 AK |
537 | /* |
538 | * Find an entry in a page-table-directory. We combine the address region | |
539 | * (the high order N bits) and the pgd portion of the address. | |
540 | */ | |
3dfcb315 AK |
541 | |
542 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | |
543 | ||
368ced78 AK |
544 | #define pud_offset(pgdp, addr) \ |
545 | (((pud_t *) pgd_page_vaddr(*(pgdp))) + pud_index(addr)) | |
3dfcb315 | 546 | #define pmd_offset(pudp,addr) \ |
371352ca | 547 | (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr)) |
3dfcb315 | 548 | #define pte_offset_kernel(dir,addr) \ |
371352ca | 549 | (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr)) |
3dfcb315 AK |
550 | |
551 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) | |
552 | #define pte_unmap(pte) do { } while(0) | |
553 | ||
554 | /* to find an entry in a kernel page-table-directory */ | |
555 | /* This now only contains the vmalloc pages */ | |
556 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
3dfcb315 AK |
557 | |
558 | #define pte_ERROR(e) \ | |
559 | pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) | |
560 | #define pmd_ERROR(e) \ | |
561 | pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) | |
368ced78 AK |
562 | #define pud_ERROR(e) \ |
563 | pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e)) | |
3dfcb315 AK |
564 | #define pgd_ERROR(e) \ |
565 | pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | |
566 | ||
567 | /* Encode and de-code a swap entry */ | |
568 | #define MAX_SWAPFILES_CHECK() do { \ | |
569 | BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \ | |
570 | /* \ | |
571 | * Don't have overlapping bits with _PAGE_HPTEFLAGS \ | |
572 | * We filter HPTEFLAGS on set_pte. \ | |
573 | */ \ | |
574 | BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \ | |
7207f436 | 575 | BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY); \ |
3dfcb315 AK |
576 | } while (0) |
577 | /* | |
578 | * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT; | |
579 | */ | |
580 | #define SWP_TYPE_BITS 5 | |
581 | #define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \ | |
582 | & ((1UL << SWP_TYPE_BITS) - 1)) | |
96270b1f | 583 | #define __swp_offset(x) (((x).val & PTE_RPN_MASK) >> PAGE_SHIFT) |
3dfcb315 | 584 | #define __swp_entry(type, offset) ((swp_entry_t) { \ |
f1a9ae03 | 585 | ((type) << _PAGE_BIT_SWAP_TYPE) \ |
96270b1f | 586 | | (((offset) << PAGE_SHIFT) & PTE_RPN_MASK)}) |
44734f23 AK |
587 | /* |
588 | * swp_entry_t must be independent of pte bits. We build a swp_entry_t from | |
589 | * swap type and offset we get from swap and convert that to pte to find a | |
590 | * matching pte in linux page table. | |
591 | * Clear bits not found in swap entries here. | |
592 | */ | |
593 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE }) | |
594 | #define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE) | |
3dfcb315 | 595 | |
e7bfc462 AK |
596 | static inline bool pte_user(pte_t pte) |
597 | { | |
ac29c640 | 598 | return !(pte_val(pte) & _PAGE_PRIVILEGED); |
e7bfc462 AK |
599 | } |
600 | ||
2f10f1a7 | 601 | #ifdef CONFIG_MEM_SOFT_DIRTY |
7207f436 | 602 | #define _PAGE_SWP_SOFT_DIRTY (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE)) |
2f10f1a7 HD |
603 | #else |
604 | #define _PAGE_SWP_SOFT_DIRTY 0UL | |
605 | #endif /* CONFIG_MEM_SOFT_DIRTY */ | |
606 | ||
607 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY | |
7207f436 LD |
608 | static inline pte_t pte_swp_mksoft_dirty(pte_t pte) |
609 | { | |
610 | return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY); | |
611 | } | |
612 | static inline bool pte_swp_soft_dirty(pte_t pte) | |
613 | { | |
614 | return !!(pte_val(pte) & _PAGE_SWP_SOFT_DIRTY); | |
615 | } | |
616 | static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) | |
617 | { | |
618 | return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY); | |
619 | } | |
7207f436 LD |
620 | #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ |
621 | ||
ac29c640 AK |
622 | static inline bool check_pte_access(unsigned long access, unsigned long ptev) |
623 | { | |
624 | /* | |
625 | * This check for _PAGE_RWX and _PAGE_PRESENT bits | |
626 | */ | |
627 | if (access & ~ptev) | |
628 | return false; | |
629 | /* | |
630 | * This check for access to privilege space | |
631 | */ | |
632 | if ((access & _PAGE_PRIVILEGED) != (ptev & _PAGE_PRIVILEGED)) | |
633 | return false; | |
634 | ||
635 | return true; | |
636 | } | |
637 | ||
3dfcb315 AK |
638 | void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); |
639 | void pgtable_cache_init(void); | |
3dfcb315 | 640 | |
3dfcb315 AK |
641 | struct page *realmode_pfn_to_page(unsigned long pfn); |
642 | ||
3dfcb315 | 643 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
3dfcb315 AK |
644 | extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); |
645 | extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); | |
646 | extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); | |
647 | extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |
648 | pmd_t *pmdp, pmd_t pmd); | |
649 | extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, | |
650 | pmd_t *pmd); | |
3dfcb315 | 651 | extern int has_transparent_hugepage(void); |
3dfcb315 AK |
652 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
653 | ||
3dfcb315 AK |
654 | |
655 | static inline pte_t pmd_pte(pmd_t pmd) | |
656 | { | |
657 | return __pte(pmd_val(pmd)); | |
658 | } | |
659 | ||
660 | static inline pmd_t pte_pmd(pte_t pte) | |
661 | { | |
662 | return __pmd(pte_val(pte)); | |
663 | } | |
664 | ||
665 | static inline pte_t *pmdp_ptep(pmd_t *pmd) | |
666 | { | |
667 | return (pte_t *)pmd; | |
668 | } | |
669 | ||
670 | #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) | |
671 | #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) | |
672 | #define pmd_young(pmd) pte_young(pmd_pte(pmd)) | |
673 | #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) | |
674 | #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) | |
675 | #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) | |
d5d6a443 | 676 | #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) |
3dfcb315 AK |
677 | #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) |
678 | #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) | |
7207f436 LD |
679 | |
680 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY | |
681 | #define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd)) | |
682 | #define pmd_mksoft_dirty(pmd) pte_pmd(pte_mksoft_dirty(pmd_pte(pmd))) | |
683 | #define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd))) | |
684 | #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ | |
685 | ||
1ca72129 AK |
686 | #ifdef CONFIG_NUMA_BALANCING |
687 | static inline int pmd_protnone(pmd_t pmd) | |
688 | { | |
689 | return pte_protnone(pmd_pte(pmd)); | |
690 | } | |
691 | #endif /* CONFIG_NUMA_BALANCING */ | |
3dfcb315 AK |
692 | |
693 | #define __HAVE_ARCH_PMD_WRITE | |
694 | #define pmd_write(pmd) pte_write(pmd_pte(pmd)) | |
695 | ||
696 | static inline pmd_t pmd_mkhuge(pmd_t pmd) | |
697 | { | |
945537df | 698 | return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE)); |
3dfcb315 AK |
699 | } |
700 | ||
3dfcb315 AK |
701 | #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS |
702 | extern int pmdp_set_access_flags(struct vm_area_struct *vma, | |
703 | unsigned long address, pmd_t *pmdp, | |
704 | pmd_t entry, int dirty); | |
705 | ||
3dfcb315 AK |
706 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG |
707 | extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |
708 | unsigned long address, pmd_t *pmdp); | |
3dfcb315 AK |
709 | |
710 | #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR | |
711 | extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, | |
712 | unsigned long addr, pmd_t *pmdp); | |
713 | ||
3dfcb315 AK |
714 | extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, |
715 | unsigned long address, pmd_t *pmdp); | |
716 | #define pmdp_collapse_flush pmdp_collapse_flush | |
717 | ||
718 | #define __HAVE_ARCH_PGTABLE_DEPOSIT | |
719 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, | |
720 | pgtable_t pgtable); | |
721 | #define __HAVE_ARCH_PGTABLE_WITHDRAW | |
722 | extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); | |
723 | ||
724 | #define __HAVE_ARCH_PMDP_INVALIDATE | |
725 | extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, | |
726 | pmd_t *pmdp); | |
727 | ||
c777e2a8 AK |
728 | #define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE |
729 | extern void pmdp_huge_split_prepare(struct vm_area_struct *vma, | |
730 | unsigned long address, pmd_t *pmdp); | |
731 | ||
3dfcb315 AK |
732 | #define pmd_move_must_withdraw pmd_move_must_withdraw |
733 | struct spinlock; | |
734 | static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, | |
735 | struct spinlock *old_pmd_ptl) | |
736 | { | |
737 | /* | |
738 | * Archs like ppc64 use pgtable to store per pmd | |
739 | * specific information. So when we switch the pmd, | |
740 | * we should also withdraw and deposit the pgtable | |
741 | */ | |
742 | return true; | |
743 | } | |
744 | #endif /* __ASSEMBLY__ */ | |
745 | #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */ |