powerpc: introduce pte_set_hidx() helper
[linux-2.6-block.git] / arch / powerpc / include / asm / book3s / 64 / hash-64k.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
ab537dca
AK
2#ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H
3#define _ASM_POWERPC_BOOK3S_64_HASH_64K_H
4
dd1842a2 5#define H_PTE_INDEX_SIZE 8
ba95b5d0
ME
6#define H_PMD_INDEX_SIZE 10
7#define H_PUD_INDEX_SIZE 7
8#define H_PGD_INDEX_SIZE 8
ab537dca 9
f5bd0fdc
AK
10/*
11 * 64k aligned address free up few of the lower bits of RPN for us
12 * We steal that here. For more deatils look at pte_pfn/pfn_pte()
13 */
32789d38
AK
14#define H_PAGE_COMBO _RPAGE_RPN0 /* this is a combo 4k page */
15#define H_PAGE_4K_PFN _RPAGE_RPN1 /* PFN is for a single 4k page */
bf680d51 16/*
945537df
AK
17 * We need to differentiate between explicit huge page and THP huge
18 * page, since THP huge page also need to track real subpage details
16c2d476 19 */
945537df
AK
20#define H_PAGE_THP_HUGE H_PAGE_4K_PFN
21
22/*
23 * Used to track subpage group valid if H_PAGE_COMBO is set
24 * This overloads H_PAGE_F_GIX and H_PAGE_F_SECOND
25 */
26#define H_PAGE_COMBO_VALID (H_PAGE_F_GIX | H_PAGE_F_SECOND)
3c726f8d
BH
27
28/* PTE flags to conserve for HPTE identification */
945537df
AK
29#define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_F_SECOND | \
30 H_PAGE_F_GIX | H_PAGE_HASHPTE | H_PAGE_COMBO)
62607bc6
AK
31/*
32 * we support 16 fragments per PTE page of 64K size.
33 */
5ed7ecd0 34#define H_PTE_FRAG_NR 16
62607bc6
AK
35/*
36 * We use a 2K PTE page fragment and another 2K for storing
37 * real_pte_t hash index
38 */
5ed7ecd0 39#define H_PTE_FRAG_SIZE_SHIFT 12
62607bc6
AK
40#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
41
c605782b 42#ifndef __ASSEMBLY__
96270b1f 43#include <asm/errno.h>
3c726f8d 44
c605782b
BH
45/*
46 * With 64K pages on hash table, we have a special PTE format that
47 * uses a second "half" of the page table to encode sub-page information
48 * in order to deal with 64K made of 4K HW pages. Thus we override the
49 * generic accessors and iterators here
50 */
85c1fafd
AK
51#define __real_pte __real_pte
52static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
53{
54 real_pte_t rpte;
506b863c 55 unsigned long *hidxp;
85c1fafd
AK
56
57 rpte.pte = pte;
58 rpte.hidx = 0;
945537df 59 if (pte_val(pte) & H_PAGE_COMBO) {
85c1fafd 60 /*
945537df 61 * Make sure we order the hidx load against the H_PAGE_COMBO
85c1fafd
AK
62 * check. The store side ordering is done in __hash_page_4K
63 */
64 smp_rmb();
506b863c
AK
65 hidxp = (unsigned long *)(ptep + PTRS_PER_PTE);
66 rpte.hidx = *hidxp;
85c1fafd
AK
67 }
68 return rpte;
69}
70
59aa31fd
RP
71#define HIDX_BITS(x, index) (x << (index << 2))
72
85c1fafd
AK
73static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
74{
945537df 75 if ((pte_val(rpte.pte) & H_PAGE_COMBO))
85c1fafd 76 return (rpte.hidx >> (index<<2)) & 0xf;
945537df 77 return (pte_val(rpte.pte) >> H_PAGE_F_GIX_SHIFT) & 0xf;
85c1fafd
AK
78}
79
59aa31fd
RP
80/*
81 * Commit the hidx and return PTE bits that needs to be modified. The caller is
82 * expected to modify the PTE bits accordingly and commit the PTE to memory.
83 */
84static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
85 unsigned int subpg_index, unsigned long hidx)
86{
87 unsigned long *hidxp = (unsigned long *)(ptep + PTRS_PER_PTE);
88
89 rpte.hidx &= ~HIDX_BITS(0xfUL, subpg_index);
90 *hidxp = rpte.hidx | HIDX_BITS(hidx, subpg_index);
91
92 /*
93 * Anyone reading PTE must ensure hidx bits are read after reading the
94 * PTE by using the read-side barrier smp_rmb(). __real_pte() can be
95 * used for that.
96 */
97 smp_wmb();
98
99 /* No PTE bits to be modified, return 0x0UL */
100 return 0x0UL;
101}
102
3c726f8d 103#define __rpte_to_pte(r) ((r).pte)
bf680d51 104extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index);
ab537dca
AK
105/*
106 * Trick: we set __end to va + 64k, which happens works for
3c726f8d
BH
107 * a 16M page as well as we want only one iteration
108 */
5524a27d
AK
109#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \
110 do { \
111 unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \
112 unsigned __split = (psize == MMU_PAGE_4K || \
113 psize == MMU_PAGE_64K_AP); \
114 shift = mmu_psize_defs[psize].shift; \
115 for (index = 0; vpn < __end; index++, \
116 vpn += (1L << (shift - VPN_SHIFT))) { \
117 if (!__split || __rpte_sub_valid(rpte, index)) \
118 do {
3c726f8d
BH
119
120#define pte_iterate_hashed_end() } while(0); } } while(0)
121
16c2d476 122#define pte_pagesize_index(mm, addr, pte) \
945537df 123 (((pte) & H_PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
3c726f8d 124
96270b1f
AK
125extern int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
126 unsigned long pfn, unsigned long size, pgprot_t);
6cc1a0ee
AK
127static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
128 unsigned long pfn, pgprot_t prot)
96270b1f
AK
129{
130 if (pfn > (PTE_RPN_MASK >> PAGE_SHIFT)) {
131 WARN(1, "remap_4k_pfn called with wrong pfn value\n");
132 return -EINVAL;
133 }
134 return remap_pfn_range(vma, addr, pfn, PAGE_SIZE,
945537df 135 __pgprot(pgprot_val(prot) | H_PAGE_4K_PFN));
96270b1f 136}
721151d0 137
dd1842a2 138#define H_PTE_TABLE_SIZE PTE_FRAG_SIZE
62607bc6 139#ifdef CONFIG_TRANSPARENT_HUGEPAGE
dd1842a2
AK
140#define H_PMD_TABLE_SIZE ((sizeof(pmd_t) << PMD_INDEX_SIZE) + \
141 (sizeof(unsigned long) << PMD_INDEX_SIZE))
62607bc6 142#else
dd1842a2 143#define H_PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
62607bc6 144#endif
dd1842a2
AK
145#define H_PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
146#define H_PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
ab537dca 147
e34aa03c 148#ifdef CONFIG_TRANSPARENT_HUGEPAGE
e34aa03c
AK
149static inline char *get_hpte_slot_array(pmd_t *pmdp)
150{
151 /*
152 * The hpte hindex is stored in the pgtable whose address is in the
153 * second half of the PMD
154 *
155 * Order this load with the test for pmd_trans_huge in the caller
156 */
157 smp_rmb();
158 return *(char **)(pmdp + PTRS_PER_PMD);
159
160
161}
162/*
163 * The linux hugepage PMD now include the pmd entries followed by the address
164 * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits.
849f86a6 165 * [ 000 | 1 bit secondary | 3 bit hidx | 1 bit valid]. We use one byte per
e34aa03c
AK
166 * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and
167 * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t.
168 *
849f86a6 169 * The top three bits are intentionally left as zero. This memory location
e34aa03c
AK
170 * are also used as normal page PTE pointers. So if we have any pointers
171 * left around while we collapse a hugepage, we need to make sure
172 * _PAGE_PRESENT bit of that is zero when we look at them
173 */
174static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index)
175{
849f86a6 176 return hpte_slot_array[index] & 0x1;
e34aa03c
AK
177}
178
179static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array,
180 int index)
181{
849f86a6 182 return hpte_slot_array[index] >> 1;
e34aa03c
AK
183}
184
185static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
186 unsigned int index, unsigned int hidx)
187{
849f86a6 188 hpte_slot_array[index] = (hidx << 1) | 0x1;
e34aa03c
AK
189}
190
191/*
192 *
193 * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs
194 * page. The hugetlbfs page table walking and mangling paths are totally
195 * separated form the core VM paths and they're differentiated by
196 * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run.
197 *
198 * pmd_trans_huge() is defined as false at build time if
199 * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build
200 * time in such case.
201 *
202 * For ppc64 we need to differntiate from explicit hugepages from THP, because
203 * for THP we also track the subpage details at the pmd level. We don't do
204 * that for explicit huge pages.
205 *
206 */
6cc1a0ee 207static inline int hash__pmd_trans_huge(pmd_t pmd)
e34aa03c 208{
945537df
AK
209 return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) ==
210 (_PAGE_PTE | H_PAGE_THP_HUGE));
e34aa03c
AK
211}
212
6cc1a0ee 213static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
e34aa03c 214{
ee3caed3 215 return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
e34aa03c
AK
216}
217
3df33f12
AK
218static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
219{
220 return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE));
221}
222
223extern unsigned long hash__pmd_hugepage_update(struct mm_struct *mm,
224 unsigned long addr, pmd_t *pmdp,
225 unsigned long clr, unsigned long set);
226extern pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma,
227 unsigned long address, pmd_t *pmdp);
228extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
229 pgtable_t pgtable);
230extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
231extern void hash__pmdp_huge_split_prepare(struct vm_area_struct *vma,
232 unsigned long address, pmd_t *pmdp);
233extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
234 unsigned long addr, pmd_t *pmdp);
235extern int hash__has_transparent_hugepage(void);
e34aa03c 236#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
c605782b 237#endif /* __ASSEMBLY__ */
ab537dca
AK
238
239#endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */