powerpc/mm/hash64: Map all the kernel regions in the same 0xc range
[linux-2.6-block.git] / arch / powerpc / include / asm / hugetlb.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
6d779079
GS
2#ifndef _ASM_POWERPC_HUGETLB_H
3#define _ASM_POWERPC_HUGETLB_H
4
41151e77 5#ifdef CONFIG_HUGETLB_PAGE
6d779079
GS
6#include <asm/page.h>
7
cf9427b8 8#ifdef CONFIG_PPC_BOOK3S_64
48483760 9
bee8b3b5 10#include <asm/book3s/64/hugetlb.h>
cf9427b8
AK
11/*
12 * This should work for other subarchs too. But right now we use the
13 * new format only for 64bit book3s
14 */
15static inline pte_t *hugepd_page(hugepd_t hpd)
16{
17 BUG_ON(!hugepd_ok(hpd));
18 /*
19 * We have only four bits to encode, MMU page size
20 */
21 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
20717e1f 22 return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
cf9427b8
AK
23}
24
25static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
26{
20717e1f 27 return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
cf9427b8
AK
28}
29
30static inline unsigned int hugepd_shift(hugepd_t hpd)
31{
32 return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
33}
48483760
AK
34static inline void flush_hugetlb_page(struct vm_area_struct *vma,
35 unsigned long vmaddr)
36{
37 if (radix_enabled())
38 return radix__flush_hugetlb_page(vma, vmaddr);
39}
cf9427b8
AK
40
41#else
42
41151e77
BB
43static inline pte_t *hugepd_page(hugepd_t hpd)
44{
45 BUG_ON(!hugepd_ok(hpd));
4b914286 46#ifdef CONFIG_PPC_8xx
de0f9387 47 return (pte_t *)__va(hpd_val(hpd) & ~HUGEPD_SHIFT_MASK);
4b914286 48#else
20717e1f
AK
49 return (pte_t *)((hpd_val(hpd) &
50 ~HUGEPD_SHIFT_MASK) | PD_HUGE);
4b914286 51#endif
41151e77
BB
52}
53
54static inline unsigned int hugepd_shift(hugepd_t hpd)
55{
4b914286 56#ifdef CONFIG_PPC_8xx
20717e1f 57 return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
4b914286 58#else
20717e1f 59 return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
4b914286 60#endif
41151e77
BB
61}
62
cf9427b8
AK
63#endif /* CONFIG_PPC_BOOK3S_64 */
64
65
b30e7590 66static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
41151e77
BB
67 unsigned pdshift)
68{
69 /*
881fde1d
BB
70 * On FSL BookE, we have multiple higher-level table entries that
71 * point to the same hugepte. Just use the first one since they're all
41151e77
BB
72 * identical. So for that case, idx=0.
73 */
74 unsigned long idx = 0;
75
b30e7590 76 pte_t *dir = hugepd_page(hpd);
3fb69c6a
CL
77#ifdef CONFIG_PPC_8xx
78 idx = (addr & ((1UL << pdshift) - 1)) >> PAGE_SHIFT;
79#elif !defined(CONFIG_PPC_FSL_BOOK3E)
b30e7590 80 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
41151e77
BB
81#endif
82
83 return dir + idx;
84}
85
0895ecda
DG
86void flush_dcache_icache_hugepage(struct page *page);
87
014a32b3 88int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
6d779079 89 unsigned long len);
014a32b3 90
41151e77
BB
91static inline int is_hugepage_only_range(struct mm_struct *mm,
92 unsigned long addr,
93 unsigned long len)
94{
014a32b3
NP
95 if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled())
96 return slice_is_hugepage_only_range(mm, addr, len);
41151e77
BB
97 return 0;
98}
41151e77 99
d93e4d7d
BB
100void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
101 pte_t pte);
4b914286
CL
102#ifdef CONFIG_PPC_8xx
103static inline void flush_hugetlb_page(struct vm_area_struct *vma,
104 unsigned long vmaddr)
105{
106 flush_tlb_page(vma, vmaddr);
107}
108#else
41151e77 109void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
4b914286 110#endif
6d779079 111
1e5f50fc 112#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
42b77728 113void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
6d779079
GS
114 unsigned long end, unsigned long floor,
115 unsigned long ceiling);
116
a4d83853 117#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
0895ecda
DG
118static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
119 unsigned long addr, pte_t *ptep)
120{
41151e77 121#ifdef CONFIG_PPC64
88247e8d 122 return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
41151e77
BB
123#else
124 return __pte(pte_update(ptep, ~0UL, 0));
125#endif
0895ecda
DG
126}
127
fe632225 128#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
8fe627ec
GS
129static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
130 unsigned long addr, pte_t *ptep)
131{
584dbc77 132 huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
13dce033 133 flush_hugetlb_page(vma, addr);
8fe627ec
GS
134}
135
facf6d5b 136#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
bce85a16
BL
137int huge_ptep_set_access_flags(struct vm_area_struct *vma,
138 unsigned long addr, pte_t *ptep,
139 pte_t pte, int dirty);
7f2e9525 140
5d3a551c
WD
141static inline void arch_clear_hugepage_flags(struct page *page)
142{
143}
144
1e5f50fc
AG
145#include <asm-generic/hugetlb.h>
146
41151e77 147#else /* ! CONFIG_HUGETLB_PAGE */
41151e77
BB
148static inline void flush_hugetlb_page(struct vm_area_struct *vma,
149 unsigned long vmaddr)
150{
151}
a6146888 152
29409997 153#define hugepd_shift(x) 0
b30e7590 154static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
29409997
AK
155 unsigned pdshift)
156{
d8731527 157 return NULL;
29409997
AK
158}
159#endif /* CONFIG_HUGETLB_PAGE */
a6146888 160
6d779079 161#endif /* _ASM_POWERPC_HUGETLB_H */