powerpc/mm: cleanup ifdef mess in add_huge_page_size()
[linux-2.6-block.git] / arch / powerpc / include / asm / book3s / 64 / hugetlb.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
bee8b3b5
AK
2#ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_H
3#define _ASM_POWERPC_BOOK3S_64_HUGETLB_H
48483760
AK
4/*
5 * For radix we want generic code to handle hugetlb. But then if we want
6 * both hash and radix to be enabled together we need to workaround the
7 * limitations.
8 */
9void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
10void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
11extern unsigned long
12radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
13 unsigned long len, unsigned long pgoff,
14 unsigned long flags);
fbfa26d8 15
8ef5cbde
AK
16extern void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
17 unsigned long addr, pte_t *ptep,
18 pte_t old_pte, pte_t pte);
19
fbfa26d8
AK
20static inline int hstate_get_psize(struct hstate *hstate)
21{
22 unsigned long shift;
23
24 shift = huge_page_shift(hstate);
25 if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
26 return MMU_PAGE_2M;
27 else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
28 return MMU_PAGE_1G;
ccf17c8b
AK
29 else if (shift == mmu_psize_defs[MMU_PAGE_16M].shift)
30 return MMU_PAGE_16M;
31 else if (shift == mmu_psize_defs[MMU_PAGE_16G].shift)
32 return MMU_PAGE_16G;
fbfa26d8
AK
33 else {
34 WARN(1, "Wrong huge page shift\n");
35 return mmu_virtual_psize;
36 }
37}
049d567a 38
40692eb5
AK
39#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
40static inline bool gigantic_page_supported(void)
41{
35f2806b
AK
42 /*
43 * We used gigantic page reservation with hypervisor assist in some case.
44 * We cannot use runtime allocation of gigantic pages in those platforms
45 * This is hash translation mode LPARs.
46 */
47 if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
48 return false;
49
4ae279c2 50 return true;
40692eb5
AK
51}
52#endif
53
f1981b5b
AK
54/* hugepd entry valid bit */
55#define HUGEPD_VAL_BITS (0x8000000000000000UL)
56
8ef5cbde
AK
57#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
58extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
59 unsigned long addr, pte_t *ptep);
60
61#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
62extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
63 unsigned long addr, pte_t *ptep,
64 pte_t old_pte, pte_t new_pte);
8197af22
CL
65/*
66 * This should work for other subarchs too. But right now we use the
67 * new format only for 64bit book3s
68 */
69static inline pte_t *hugepd_page(hugepd_t hpd)
70{
71 BUG_ON(!hugepd_ok(hpd));
72 /*
73 * We have only four bits to encode, MMU page size
74 */
75 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
76 return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
77}
78
79static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
80{
81 return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
82}
83
84static inline unsigned int hugepd_shift(hugepd_t hpd)
85{
86 return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
87}
88static inline void flush_hugetlb_page(struct vm_area_struct *vma,
89 unsigned long vmaddr)
90{
91 if (radix_enabled())
92 return radix__flush_hugetlb_page(vma, vmaddr);
93}
94
95static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
96 unsigned int pdshift)
97{
98 unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
99
100 return hugepd_page(hpd) + idx;
101}
102
5fb84fec
CL
103static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
104{
105 *hpdp = __hugepd(__pa(new) | HUGEPD_VAL_BITS | (shift_to_mmu_psize(pshift) << 2));
106}
107
8197af22
CL
108void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
109
723f268f
CL
110static inline int check_and_get_huge_psize(int shift)
111{
112 int mmu_psize;
113
114 if (shift > SLICE_HIGH_SHIFT)
115 return -EINVAL;
116
117 mmu_psize = shift_to_mmu_psize(shift);
118
119 /*
120 * We need to make sure that for different page sizes reported by
121 * firmware we only add hugetlb support for page sizes that can be
122 * supported by linux page table layout.
123 * For now we have
124 * Radix: 2M and 1G
125 * Hash: 16M and 16G
126 */
127 if (radix_enabled()) {
128 if (mmu_psize != MMU_PAGE_2M && mmu_psize != MMU_PAGE_1G)
129 return -EINVAL;
130 } else {
131 if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
132 return -EINVAL;
133 }
134 return mmu_psize;
135}
136
48483760 137#endif