powerpc/mm/hash64: Map all the kernel regions in the same 0xc range
[linux-2.6-block.git] / arch / powerpc / include / asm / pte-walk.h
CommitLineData
94171b19
AK
1#ifndef _ASM_POWERPC_PTE_WALK_H
2#define _ASM_POWERPC_PTE_WALK_H
3
4#include <linux/sched.h>
5
6/* Don't use this directly */
7extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
8 bool *is_thp, unsigned *hshift);
9
10static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
11 bool *is_thp, unsigned *hshift)
12{
13 VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
14 return __find_linux_pte(pgdir, ea, is_thp, hshift);
15}
16
17static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
18{
19 pgd_t *pgdir = init_mm.pgd;
20 return __find_linux_pte(pgdir, ea, NULL, hshift);
21}
22/*
23 * This is what we should always use. Any other lockless page table lookup needs
24 * careful audit against THP split.
25 */
26static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea,
27 bool *is_thp, unsigned *hshift)
28{
29 VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
30 VM_WARN(pgdir != current->mm->pgd,
31 "%s lock less page table lookup called on wrong mm\n", __func__);
32 return __find_linux_pte(pgdir, ea, is_thp, hshift);
33}
34
35#endif /* _ASM_POWERPC_PTE_WALK_H */