Commit | Line | Data |
---|---|---|
3dfcb315 AK |
1 | #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ |
2 | #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ | |
3 | /* | |
4 | * This file contains the functions and defines necessary to modify and use | |
5 | * the ppc64 hashed page table. | |
6 | */ | |
7 | ||
ab537dca | 8 | #include <asm/book3s/64/hash.h> |
3dfcb315 AK |
9 | #include <asm/barrier.h> |
10 | ||
3dfcb315 AK |
11 | |
12 | /* | |
13 | * Size of EA range mapped by our pagetables. | |
14 | */ | |
15 | #define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ | |
16 | PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) | |
17 | #define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) | |
18 | ||
19 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
20 | #define PMD_CACHE_INDEX (PMD_INDEX_SIZE + 1) | |
21 | #else | |
22 | #define PMD_CACHE_INDEX PMD_INDEX_SIZE | |
23 | #endif | |
24 | /* | |
25 | * Define the address range of the kernel non-linear virtual area | |
26 | */ | |
3dfcb315 | 27 | #define KERN_VIRT_START ASM_CONST(0xD000000000000000) |
3dfcb315 | 28 | #define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000) |
3dfcb315 AK |
29 | /* |
30 | * The vmalloc space starts at the beginning of that region, and | |
31 | * occupies half of it on hash CPUs and a quarter of it on Book3E | |
32 | * (we keep a quarter for the virtual memmap) | |
33 | */ | |
34 | #define VMALLOC_START KERN_VIRT_START | |
3dfcb315 | 35 | #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) |
3dfcb315 | 36 | #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) |
3dfcb315 AK |
37 | /* |
38 | * The second half of the kernel virtual space is used for IO mappings, | |
39 | * it's itself carved into the PIO region (ISA and PHB IO space) and | |
40 | * the ioremap space | |
41 | * | |
42 | * ISA_IO_BASE = KERN_IO_START, 64K reserved area | |
43 | * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces | |
44 | * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE | |
45 | */ | |
46 | #define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1)) | |
47 | #define FULL_IO_SIZE 0x80000000ul | |
48 | #define ISA_IO_BASE (KERN_IO_START) | |
49 | #define ISA_IO_END (KERN_IO_START + 0x10000ul) | |
50 | #define PHB_IO_BASE (ISA_IO_END) | |
51 | #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) | |
52 | #define IOREMAP_BASE (PHB_IO_END) | |
53 | #define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE) | |
54 | ||
3dfcb315 AK |
55 | /* |
56 | * Region IDs | |
57 | */ | |
58 | #define REGION_SHIFT 60UL | |
59 | #define REGION_MASK (0xfUL << REGION_SHIFT) | |
60 | #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) | |
61 | ||
62 | #define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START)) | |
63 | #define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) | |
64 | #define VMEMMAP_REGION_ID (0xfUL) /* Server only */ | |
65 | #define USER_REGION_ID (0UL) | |
66 | ||
67 | /* | |
68 | * Defines the address of the vmemap area, in its own region on | |
cbbb8683 | 69 | * hash table CPUs. |
3dfcb315 | 70 | */ |
3dfcb315 | 71 | #define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT) |
3dfcb315 AK |
72 | #define vmemmap ((struct page *)VMEMMAP_BASE) |
73 | ||
74 | ||
3dfcb315 AK |
75 | #ifdef CONFIG_PPC_MM_SLICES |
76 | #define HAVE_ARCH_UNMAPPED_AREA | |
77 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | |
78 | #endif /* CONFIG_PPC_MM_SLICES */ | |
79 | ||
cbbb8683 AK |
80 | /* |
81 | * THP pages can't be special. So use the _PAGE_SPECIAL | |
82 | */ | |
83 | #define _PAGE_SPLITTING _PAGE_SPECIAL | |
84 | ||
85 | /* | |
86 | * We need to differentiate between explicit huge page and THP huge | |
87 | * page, since THP huge page also need to track real subpage details | |
88 | */ | |
89 | #define _PAGE_THP_HUGE _PAGE_4K_PFN | |
90 | ||
91 | /* | |
92 | * set of bits not changed in pmd_modify. | |
93 | */ | |
94 | #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | \ | |
95 | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \ | |
96 | _PAGE_THP_HUGE) | |
97 | /* | |
98 | * Default defines for things which we don't use. | |
99 | * We should get this removed. | |
100 | */ | |
101 | #include <asm/pte-common.h> | |
3dfcb315 AK |
102 | #ifndef __ASSEMBLY__ |
103 | ||
104 | /* | |
105 | * This is the default implementation of various PTE accessors, it's | |
106 | * used in all cases except Book3S with 64K pages where we have a | |
107 | * concept of sub-pages | |
108 | */ | |
109 | #ifndef __real_pte | |
110 | ||
111 | #ifdef CONFIG_STRICT_MM_TYPECHECKS | |
112 | #define __real_pte(e,p) ((real_pte_t){(e)}) | |
113 | #define __rpte_to_pte(r) ((r).pte) | |
114 | #else | |
115 | #define __real_pte(e,p) (e) | |
116 | #define __rpte_to_pte(r) (__pte(r)) | |
117 | #endif | |
118 | #define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12) | |
119 | ||
120 | #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ | |
121 | do { \ | |
122 | index = 0; \ | |
123 | shift = mmu_psize_defs[psize].shift; \ | |
124 | ||
125 | #define pte_iterate_hashed_end() } while(0) | |
126 | ||
127 | /* | |
128 | * We expect this to be called only for user addresses or kernel virtual | |
129 | * addresses other than the linear mapping. | |
130 | */ | |
131 | #define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K | |
132 | ||
133 | #endif /* __real_pte */ | |
134 | ||
135 | ||
136 | /* pte_clear moved to later in this file */ | |
137 | ||
138 | #define PMD_BAD_BITS (PTE_TABLE_SIZE-1) | |
139 | #define PUD_BAD_BITS (PMD_TABLE_SIZE-1) | |
140 | ||
cbbb8683 | 141 | #define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval)) |
3dfcb315 AK |
142 | #define pmd_none(pmd) (!pmd_val(pmd)) |
143 | #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ | |
144 | || (pmd_val(pmd) & PMD_BAD_BITS)) | |
145 | #define pmd_present(pmd) (!pmd_none(pmd)) | |
146 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) | |
147 | #define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) | |
148 | extern struct page *pmd_page(pmd_t pmd); | |
149 | ||
150 | #define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval)) | |
151 | #define pud_none(pud) (!pud_val(pud)) | |
152 | #define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \ | |
153 | || (pud_val(pud) & PUD_BAD_BITS)) | |
154 | #define pud_present(pud) (pud_val(pud) != 0) | |
155 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0) | |
156 | #define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) | |
157 | ||
158 | extern struct page *pud_page(pud_t pud); | |
159 | ||
160 | static inline pte_t pud_pte(pud_t pud) | |
161 | { | |
162 | return __pte(pud_val(pud)); | |
163 | } | |
164 | ||
165 | static inline pud_t pte_pud(pte_t pte) | |
166 | { | |
167 | return __pud(pte_val(pte)); | |
168 | } | |
169 | #define pud_write(pud) pte_write(pud_pte(pud)) | |
170 | #define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) | |
171 | #define pgd_write(pgd) pte_write(pgd_pte(pgd)) | |
172 | ||
173 | /* | |
174 | * Find an entry in a page-table-directory. We combine the address region | |
175 | * (the high order N bits) and the pgd portion of the address. | |
176 | */ | |
177 | #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) | |
178 | ||
179 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | |
180 | ||
181 | #define pmd_offset(pudp,addr) \ | |
182 | (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) | |
183 | ||
184 | #define pte_offset_kernel(dir,addr) \ | |
185 | (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) | |
186 | ||
187 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) | |
188 | #define pte_unmap(pte) do { } while(0) | |
189 | ||
190 | /* to find an entry in a kernel page-table-directory */ | |
191 | /* This now only contains the vmalloc pages */ | |
192 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
193 | extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | |
194 | pte_t *ptep, unsigned long pte, int huge); | |
195 | ||
196 | /* Atomic PTE updates */ | |
197 | static inline unsigned long pte_update(struct mm_struct *mm, | |
198 | unsigned long addr, | |
199 | pte_t *ptep, unsigned long clr, | |
200 | unsigned long set, | |
201 | int huge) | |
202 | { | |
3dfcb315 AK |
203 | unsigned long old, tmp; |
204 | ||
205 | __asm__ __volatile__( | |
206 | "1: ldarx %0,0,%3 # pte_update\n\ | |
207 | andi. %1,%0,%6\n\ | |
208 | bne- 1b \n\ | |
209 | andc %1,%0,%4 \n\ | |
210 | or %1,%1,%7\n\ | |
211 | stdcx. %1,0,%3 \n\ | |
212 | bne- 1b" | |
213 | : "=&r" (old), "=&r" (tmp), "=m" (*ptep) | |
214 | : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set) | |
215 | : "cc" ); | |
3dfcb315 AK |
216 | /* huge pages use the old page table lock */ |
217 | if (!huge) | |
218 | assert_pte_locked(mm, addr); | |
219 | ||
3dfcb315 AK |
220 | if (old & _PAGE_HASHPTE) |
221 | hpte_need_flush(mm, addr, ptep, old, huge); | |
3dfcb315 AK |
222 | |
223 | return old; | |
224 | } | |
225 | ||
226 | static inline int __ptep_test_and_clear_young(struct mm_struct *mm, | |
227 | unsigned long addr, pte_t *ptep) | |
228 | { | |
229 | unsigned long old; | |
230 | ||
231 | if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) | |
232 | return 0; | |
233 | old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); | |
234 | return (old & _PAGE_ACCESSED) != 0; | |
235 | } | |
236 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
237 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ | |
238 | ({ \ | |
239 | int __r; \ | |
240 | __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ | |
241 | __r; \ | |
242 | }) | |
243 | ||
244 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
245 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |
246 | pte_t *ptep) | |
247 | { | |
248 | ||
249 | if ((pte_val(*ptep) & _PAGE_RW) == 0) | |
250 | return; | |
251 | ||
252 | pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); | |
253 | } | |
254 | ||
255 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | |
256 | unsigned long addr, pte_t *ptep) | |
257 | { | |
258 | if ((pte_val(*ptep) & _PAGE_RW) == 0) | |
259 | return; | |
260 | ||
261 | pte_update(mm, addr, ptep, _PAGE_RW, 0, 1); | |
262 | } | |
263 | ||
264 | /* | |
265 | * We currently remove entries from the hashtable regardless of whether | |
266 | * the entry was young or dirty. The generic routines only flush if the | |
267 | * entry was young or dirty which is not good enough. | |
268 | * | |
269 | * We should be more intelligent about this but for the moment we override | |
270 | * these functions and force a tlb flush unconditionally | |
271 | */ | |
272 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | |
273 | #define ptep_clear_flush_young(__vma, __address, __ptep) \ | |
274 | ({ \ | |
275 | int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \ | |
276 | __ptep); \ | |
277 | __young; \ | |
278 | }) | |
279 | ||
280 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
281 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | |
282 | unsigned long addr, pte_t *ptep) | |
283 | { | |
284 | unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); | |
285 | return __pte(old); | |
286 | } | |
287 | ||
288 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, | |
289 | pte_t * ptep) | |
290 | { | |
291 | pte_update(mm, addr, ptep, ~0UL, 0, 0); | |
292 | } | |
293 | ||
294 | ||
295 | /* Set the dirty and/or accessed bits atomically in a linux PTE, this | |
296 | * function doesn't need to flush the hash entry | |
297 | */ | |
298 | static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) | |
299 | { | |
300 | unsigned long bits = pte_val(entry) & | |
301 | (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); | |
302 | ||
3dfcb315 AK |
303 | unsigned long old, tmp; |
304 | ||
305 | __asm__ __volatile__( | |
306 | "1: ldarx %0,0,%4\n\ | |
307 | andi. %1,%0,%6\n\ | |
308 | bne- 1b \n\ | |
309 | or %0,%3,%0\n\ | |
310 | stdcx. %0,0,%4\n\ | |
311 | bne- 1b" | |
312 | :"=&r" (old), "=&r" (tmp), "=m" (*ptep) | |
313 | :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) | |
314 | :"cc"); | |
3dfcb315 AK |
315 | } |
316 | ||
317 | #define __HAVE_ARCH_PTE_SAME | |
318 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) | |
319 | ||
320 | #define pte_ERROR(e) \ | |
321 | pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) | |
322 | #define pmd_ERROR(e) \ | |
323 | pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) | |
324 | #define pgd_ERROR(e) \ | |
325 | pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | |
326 | ||
327 | /* Encode and de-code a swap entry */ | |
328 | #define MAX_SWAPFILES_CHECK() do { \ | |
329 | BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \ | |
330 | /* \ | |
331 | * Don't have overlapping bits with _PAGE_HPTEFLAGS \ | |
332 | * We filter HPTEFLAGS on set_pte. \ | |
333 | */ \ | |
334 | BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \ | |
335 | } while (0) | |
336 | /* | |
337 | * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT; | |
338 | */ | |
339 | #define SWP_TYPE_BITS 5 | |
340 | #define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \ | |
341 | & ((1UL << SWP_TYPE_BITS) - 1)) | |
342 | #define __swp_offset(x) ((x).val >> PTE_RPN_SHIFT) | |
343 | #define __swp_entry(type, offset) ((swp_entry_t) { \ | |
344 | ((type) << _PAGE_BIT_SWAP_TYPE) \ | |
345 | | ((offset) << PTE_RPN_SHIFT) }) | |
346 | ||
347 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) | |
348 | #define __swp_entry_to_pte(x) __pte((x).val) | |
349 | ||
350 | void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); | |
351 | void pgtable_cache_init(void); | |
3dfcb315 | 352 | |
3dfcb315 AK |
353 | /* |
354 | * The linux hugepage PMD now include the pmd entries followed by the address | |
355 | * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits. | |
356 | * [ 1 bit secondary | 3 bit hidx | 1 bit valid | 000]. We use one byte per | |
357 | * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and | |
358 | * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t. | |
359 | * | |
360 | * The last three bits are intentionally left to zero. This memory location | |
361 | * are also used as normal page PTE pointers. So if we have any pointers | |
362 | * left around while we collapse a hugepage, we need to make sure | |
363 | * _PAGE_PRESENT bit of that is zero when we look at them | |
364 | */ | |
365 | static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index) | |
366 | { | |
367 | return (hpte_slot_array[index] >> 3) & 0x1; | |
368 | } | |
369 | ||
370 | static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array, | |
371 | int index) | |
372 | { | |
373 | return hpte_slot_array[index] >> 4; | |
374 | } | |
375 | ||
376 | static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, | |
377 | unsigned int index, unsigned int hidx) | |
378 | { | |
379 | hpte_slot_array[index] = hidx << 4 | 0x1 << 3; | |
380 | } | |
381 | ||
382 | struct page *realmode_pfn_to_page(unsigned long pfn); | |
383 | ||
384 | static inline char *get_hpte_slot_array(pmd_t *pmdp) | |
385 | { | |
386 | /* | |
387 | * The hpte hindex is stored in the pgtable whose address is in the | |
388 | * second half of the PMD | |
389 | * | |
390 | * Order this load with the test for pmd_trans_huge in the caller | |
391 | */ | |
392 | smp_rmb(); | |
393 | return *(char **)(pmdp + PTRS_PER_PMD); | |
394 | ||
395 | ||
396 | } | |
397 | ||
398 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
399 | extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, | |
400 | pmd_t *pmdp, unsigned long old_pmd); | |
401 | extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); | |
402 | extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); | |
403 | extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); | |
404 | extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |
405 | pmd_t *pmdp, pmd_t pmd); | |
406 | extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, | |
407 | pmd_t *pmd); | |
408 | /* | |
409 | * | |
410 | * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs | |
411 | * page. The hugetlbfs page table walking and mangling paths are totally | |
412 | * separated form the core VM paths and they're differentiated by | |
413 | * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run. | |
414 | * | |
415 | * pmd_trans_huge() is defined as false at build time if | |
416 | * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build | |
417 | * time in such case. | |
418 | * | |
419 | * For ppc64 we need to differntiate from explicit hugepages from THP, because | |
420 | * for THP we also track the subpage details at the pmd level. We don't do | |
421 | * that for explicit huge pages. | |
422 | * | |
423 | */ | |
424 | static inline int pmd_trans_huge(pmd_t pmd) | |
425 | { | |
426 | /* | |
427 | * leaf pte for huge page, bottom two bits != 00 | |
428 | */ | |
429 | return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE); | |
430 | } | |
431 | ||
432 | static inline int pmd_trans_splitting(pmd_t pmd) | |
433 | { | |
434 | if (pmd_trans_huge(pmd)) | |
435 | return pmd_val(pmd) & _PAGE_SPLITTING; | |
436 | return 0; | |
437 | } | |
438 | ||
439 | extern int has_transparent_hugepage(void); | |
440 | #else | |
441 | static inline void hpte_do_hugepage_flush(struct mm_struct *mm, | |
442 | unsigned long addr, pmd_t *pmdp, | |
443 | unsigned long old_pmd) | |
444 | { | |
445 | ||
446 | WARN(1, "%s called with THP disabled\n", __func__); | |
447 | } | |
448 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
449 | ||
450 | static inline int pmd_large(pmd_t pmd) | |
451 | { | |
452 | /* | |
453 | * leaf pte for huge page, bottom two bits != 00 | |
454 | */ | |
455 | return ((pmd_val(pmd) & 0x3) != 0x0); | |
456 | } | |
457 | ||
458 | static inline pte_t pmd_pte(pmd_t pmd) | |
459 | { | |
460 | return __pte(pmd_val(pmd)); | |
461 | } | |
462 | ||
463 | static inline pmd_t pte_pmd(pte_t pte) | |
464 | { | |
465 | return __pmd(pte_val(pte)); | |
466 | } | |
467 | ||
468 | static inline pte_t *pmdp_ptep(pmd_t *pmd) | |
469 | { | |
470 | return (pte_t *)pmd; | |
471 | } | |
472 | ||
473 | #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) | |
474 | #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) | |
475 | #define pmd_young(pmd) pte_young(pmd_pte(pmd)) | |
476 | #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) | |
477 | #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) | |
478 | #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) | |
479 | #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) | |
480 | #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) | |
481 | ||
482 | #define __HAVE_ARCH_PMD_WRITE | |
483 | #define pmd_write(pmd) pte_write(pmd_pte(pmd)) | |
484 | ||
485 | static inline pmd_t pmd_mkhuge(pmd_t pmd) | |
486 | { | |
487 | /* Do nothing, mk_pmd() does this part. */ | |
488 | return pmd; | |
489 | } | |
490 | ||
491 | static inline pmd_t pmd_mknotpresent(pmd_t pmd) | |
492 | { | |
493 | pmd_val(pmd) &= ~_PAGE_PRESENT; | |
494 | return pmd; | |
495 | } | |
496 | ||
497 | static inline pmd_t pmd_mksplitting(pmd_t pmd) | |
498 | { | |
499 | pmd_val(pmd) |= _PAGE_SPLITTING; | |
500 | return pmd; | |
501 | } | |
502 | ||
503 | #define __HAVE_ARCH_PMD_SAME | |
504 | static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) | |
505 | { | |
506 | return (((pmd_val(pmd_a) ^ pmd_val(pmd_b)) & ~_PAGE_HPTEFLAGS) == 0); | |
507 | } | |
508 | ||
509 | #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS | |
510 | extern int pmdp_set_access_flags(struct vm_area_struct *vma, | |
511 | unsigned long address, pmd_t *pmdp, | |
512 | pmd_t entry, int dirty); | |
513 | ||
514 | extern unsigned long pmd_hugepage_update(struct mm_struct *mm, | |
515 | unsigned long addr, | |
516 | pmd_t *pmdp, | |
517 | unsigned long clr, | |
518 | unsigned long set); | |
519 | ||
520 | static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, | |
521 | unsigned long addr, pmd_t *pmdp) | |
522 | { | |
523 | unsigned long old; | |
524 | ||
525 | if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) | |
526 | return 0; | |
527 | old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0); | |
528 | return ((old & _PAGE_ACCESSED) != 0); | |
529 | } | |
530 | ||
531 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG | |
532 | extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |
533 | unsigned long address, pmd_t *pmdp); | |
534 | #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH | |
535 | extern int pmdp_clear_flush_young(struct vm_area_struct *vma, | |
536 | unsigned long address, pmd_t *pmdp); | |
537 | ||
538 | #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR | |
539 | extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, | |
540 | unsigned long addr, pmd_t *pmdp); | |
541 | ||
542 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT | |
543 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |
544 | pmd_t *pmdp) | |
545 | { | |
546 | ||
547 | if ((pmd_val(*pmdp) & _PAGE_RW) == 0) | |
548 | return; | |
549 | ||
550 | pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0); | |
551 | } | |
552 | ||
553 | #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH | |
554 | extern void pmdp_splitting_flush(struct vm_area_struct *vma, | |
555 | unsigned long address, pmd_t *pmdp); | |
556 | ||
557 | extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, | |
558 | unsigned long address, pmd_t *pmdp); | |
559 | #define pmdp_collapse_flush pmdp_collapse_flush | |
560 | ||
561 | #define __HAVE_ARCH_PGTABLE_DEPOSIT | |
562 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, | |
563 | pgtable_t pgtable); | |
564 | #define __HAVE_ARCH_PGTABLE_WITHDRAW | |
565 | extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); | |
566 | ||
567 | #define __HAVE_ARCH_PMDP_INVALIDATE | |
568 | extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, | |
569 | pmd_t *pmdp); | |
570 | ||
571 | #define pmd_move_must_withdraw pmd_move_must_withdraw | |
572 | struct spinlock; | |
573 | static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, | |
574 | struct spinlock *old_pmd_ptl) | |
575 | { | |
576 | /* | |
577 | * Archs like ppc64 use pgtable to store per pmd | |
578 | * specific information. So when we switch the pmd, | |
579 | * we should also withdraw and deposit the pgtable | |
580 | */ | |
581 | return true; | |
582 | } | |
583 | #endif /* __ASSEMBLY__ */ | |
584 | #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */ |