powerpc/mm/radix: Update pte fragment count from 16 to 256 on radix
[linux-2.6-block.git] / arch / powerpc / include / asm / book3s / 64 / hash-64k.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
ab537dca
AK
2#ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H
3#define _ASM_POWERPC_BOOK3S_64_HASH_64K_H
4
dd1842a2 5#define H_PTE_INDEX_SIZE 8
ba95b5d0 6#define H_PMD_INDEX_SIZE 10
c2b4d8b7 7#define H_PUD_INDEX_SIZE 10
ba95b5d0 8#define H_PGD_INDEX_SIZE 8
ab537dca 9
f384796c
AK
10/*
11 * Each context is 512TB size. SLB miss for first context/default context
12 * is handled in the hotpath.
13 */
14#define MAX_EA_BITS_PER_CONTEXT 49
15
f5bd0fdc
AK
16/*
17 * 64k aligned address free up few of the lower bits of RPN for us
18 * We steal that here. For more deatils look at pte_pfn/pfn_pte()
19 */
32789d38
AK
20#define H_PAGE_COMBO _RPAGE_RPN0 /* this is a combo 4k page */
21#define H_PAGE_4K_PFN _RPAGE_RPN1 /* PFN is for a single 4k page */
bf9a95f9 22#define H_PAGE_BUSY _RPAGE_RPN44 /* software: PTE & hash are busy */
273b4936 23#define H_PAGE_HASHPTE _RPAGE_RPN43 /* PTE has associated HPTE */
9d2edb18 24
1a2f7789
AK
25/* memory key bits. */
26#define H_PTE_PKEY_BIT0 _RPAGE_RSV1
27#define H_PTE_PKEY_BIT1 _RPAGE_RSV2
28#define H_PTE_PKEY_BIT2 _RPAGE_RSV3
29#define H_PTE_PKEY_BIT3 _RPAGE_RSV4
30#define H_PTE_PKEY_BIT4 _RPAGE_RSV5
31
bf680d51 32/*
945537df
AK
33 * We need to differentiate between explicit huge page and THP huge
34 * page, since THP huge page also need to track real subpage details
16c2d476 35 */
945537df
AK
36#define H_PAGE_THP_HUGE H_PAGE_4K_PFN
37
3c726f8d 38/* PTE flags to conserve for HPTE identification */
bf9a95f9 39#define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_HASHPTE | H_PAGE_COMBO)
62607bc6
AK
40/*
41 * We use a 2K PTE page fragment and another 2K for storing
42 * real_pte_t hash index
fb4e5dbd
AK
43 * 8 bytes per each pte entry and another 8 bytes for storing
44 * slot details.
62607bc6 45 */
fb4e5dbd
AK
46#define H_PTE_FRAG_SIZE_SHIFT (H_PTE_INDEX_SIZE + 3 + 1)
47#define H_PTE_FRAG_NR (PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT)
62607bc6 48
c605782b 49#ifndef __ASSEMBLY__
96270b1f 50#include <asm/errno.h>
3c726f8d 51
c605782b
BH
52/*
53 * With 64K pages on hash table, we have a special PTE format that
54 * uses a second "half" of the page table to encode sub-page information
55 * in order to deal with 64K made of 4K HW pages. Thus we override the
56 * generic accessors and iterators here
57 */
85c1fafd 58#define __real_pte __real_pte
ff31e105 59static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset)
85c1fafd
AK
60{
61 real_pte_t rpte;
506b863c 62 unsigned long *hidxp;
85c1fafd
AK
63
64 rpte.pte = pte;
bf9a95f9
RP
65
66 /*
67 * Ensure that we do not read the hidx before we read the PTE. Because
68 * the writer side is expected to finish writing the hidx first followed
69 * by the PTE, by using smp_wmb(). pte_set_hash_slot() ensures that.
70 */
71 smp_rmb();
72
ff31e105 73 hidxp = (unsigned long *)(ptep + offset);
bf9a95f9 74 rpte.hidx = *hidxp;
85c1fafd
AK
75 return rpte;
76}
77
7b84947c
RP
78/*
79 * shift the hidx representation by one-modulo-0xf; i.e hidx 0 is respresented
80 * as 1, 1 as 2,... , and 0xf as 0. This convention lets us represent a
81 * invalid hidx 0xf with a 0x0 bit value. PTEs are anyway zero'd when
82 * allocated. We dont have to zero them gain; thus save on the initialization.
83 */
84#define HIDX_UNSHIFT_BY_ONE(x) ((x + 0xfUL) & 0xfUL) /* shift backward by one */
85#define HIDX_SHIFT_BY_ONE(x) ((x + 0x1UL) & 0xfUL) /* shift forward by one */
59aa31fd 86#define HIDX_BITS(x, index) (x << (index << 2))
bf9a95f9 87#define BITS_TO_HIDX(x, index) ((x >> (index << 2)) & 0xfUL)
7b84947c 88#define INVALID_RPTE_HIDX 0x0UL
59aa31fd 89
85c1fafd
AK
90static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
91{
7b84947c 92 return HIDX_UNSHIFT_BY_ONE(BITS_TO_HIDX(rpte.hidx, index));
85c1fafd
AK
93}
94
59aa31fd
RP
95/*
96 * Commit the hidx and return PTE bits that needs to be modified. The caller is
97 * expected to modify the PTE bits accordingly and commit the PTE to memory.
98 */
99static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
ff31e105
AK
100 unsigned int subpg_index,
101 unsigned long hidx, int offset)
59aa31fd 102{
ff31e105 103 unsigned long *hidxp = (unsigned long *)(ptep + offset);
59aa31fd
RP
104
105 rpte.hidx &= ~HIDX_BITS(0xfUL, subpg_index);
7b84947c 106 *hidxp = rpte.hidx | HIDX_BITS(HIDX_SHIFT_BY_ONE(hidx), subpg_index);
59aa31fd
RP
107
108 /*
109 * Anyone reading PTE must ensure hidx bits are read after reading the
110 * PTE by using the read-side barrier smp_rmb(). __real_pte() can be
111 * used for that.
112 */
113 smp_wmb();
114
115 /* No PTE bits to be modified, return 0x0UL */
116 return 0x0UL;
85c1fafd
AK
117}
118
3c726f8d 119#define __rpte_to_pte(r) ((r).pte)
bf680d51 120extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index);
ab537dca
AK
121/*
122 * Trick: we set __end to va + 64k, which happens works for
3c726f8d
BH
123 * a 16M page as well as we want only one iteration
124 */
5524a27d
AK
125#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \
126 do { \
127 unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \
128 unsigned __split = (psize == MMU_PAGE_4K || \
129 psize == MMU_PAGE_64K_AP); \
130 shift = mmu_psize_defs[psize].shift; \
131 for (index = 0; vpn < __end; index++, \
132 vpn += (1L << (shift - VPN_SHIFT))) { \
133 if (!__split || __rpte_sub_valid(rpte, index)) \
134 do {
3c726f8d
BH
135
136#define pte_iterate_hashed_end() } while(0); } } while(0)
137
16c2d476 138#define pte_pagesize_index(mm, addr, pte) \
945537df 139 (((pte) & H_PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
3c726f8d 140
96270b1f
AK
141extern int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
142 unsigned long pfn, unsigned long size, pgprot_t);
6cc1a0ee
AK
143static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
144 unsigned long pfn, pgprot_t prot)
96270b1f
AK
145{
146 if (pfn > (PTE_RPN_MASK >> PAGE_SHIFT)) {
147 WARN(1, "remap_4k_pfn called with wrong pfn value\n");
148 return -EINVAL;
149 }
150 return remap_pfn_range(vma, addr, pfn, PAGE_SIZE,
945537df 151 __pgprot(pgprot_val(prot) | H_PAGE_4K_PFN));
96270b1f 152}
721151d0 153
dd1842a2 154#define H_PTE_TABLE_SIZE PTE_FRAG_SIZE
4a7aa4fe 155#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined (CONFIG_HUGETLB_PAGE)
dd1842a2
AK
156#define H_PMD_TABLE_SIZE ((sizeof(pmd_t) << PMD_INDEX_SIZE) + \
157 (sizeof(unsigned long) << PMD_INDEX_SIZE))
62607bc6 158#else
dd1842a2 159#define H_PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
62607bc6 160#endif
fae22116
AK
161#ifdef CONFIG_HUGETLB_PAGE
162#define H_PUD_TABLE_SIZE ((sizeof(pud_t) << PUD_INDEX_SIZE) + \
163 (sizeof(unsigned long) << PUD_INDEX_SIZE))
164#else
dd1842a2 165#define H_PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
fae22116 166#endif
dd1842a2 167#define H_PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
ab537dca 168
e34aa03c 169#ifdef CONFIG_TRANSPARENT_HUGEPAGE
e34aa03c
AK
170static inline char *get_hpte_slot_array(pmd_t *pmdp)
171{
172 /*
173 * The hpte hindex is stored in the pgtable whose address is in the
174 * second half of the PMD
175 *
176 * Order this load with the test for pmd_trans_huge in the caller
177 */
178 smp_rmb();
179 return *(char **)(pmdp + PTRS_PER_PMD);
180
181
182}
183/*
184 * The linux hugepage PMD now include the pmd entries followed by the address
185 * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits.
849f86a6 186 * [ 000 | 1 bit secondary | 3 bit hidx | 1 bit valid]. We use one byte per
e34aa03c
AK
187 * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and
188 * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t.
189 *
849f86a6 190 * The top three bits are intentionally left as zero. This memory location
e34aa03c
AK
191 * are also used as normal page PTE pointers. So if we have any pointers
192 * left around while we collapse a hugepage, we need to make sure
193 * _PAGE_PRESENT bit of that is zero when we look at them
194 */
195static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index)
196{
849f86a6 197 return hpte_slot_array[index] & 0x1;
e34aa03c
AK
198}
199
200static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array,
201 int index)
202{
849f86a6 203 return hpte_slot_array[index] >> 1;
e34aa03c
AK
204}
205
206static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
207 unsigned int index, unsigned int hidx)
208{
849f86a6 209 hpte_slot_array[index] = (hidx << 1) | 0x1;
e34aa03c
AK
210}
211
212/*
213 *
214 * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs
215 * page. The hugetlbfs page table walking and mangling paths are totally
216 * separated form the core VM paths and they're differentiated by
217 * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run.
218 *
219 * pmd_trans_huge() is defined as false at build time if
220 * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build
221 * time in such case.
222 *
223 * For ppc64 we need to differntiate from explicit hugepages from THP, because
224 * for THP we also track the subpage details at the pmd level. We don't do
225 * that for explicit huge pages.
226 *
227 */
6cc1a0ee 228static inline int hash__pmd_trans_huge(pmd_t pmd)
e34aa03c 229{
945537df
AK
230 return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) ==
231 (_PAGE_PTE | H_PAGE_THP_HUGE));
e34aa03c
AK
232}
233
6cc1a0ee 234static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
e34aa03c 235{
ee3caed3 236 return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
e34aa03c
AK
237}
238
3df33f12
AK
239static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
240{
241 return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE));
242}
243
244extern unsigned long hash__pmd_hugepage_update(struct mm_struct *mm,
245 unsigned long addr, pmd_t *pmdp,
246 unsigned long clr, unsigned long set);
247extern pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma,
248 unsigned long address, pmd_t *pmdp);
249extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
250 pgtable_t pgtable);
251extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
3df33f12
AK
252extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
253 unsigned long addr, pmd_t *pmdp);
254extern int hash__has_transparent_hugepage(void);
e34aa03c 255#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
c605782b 256#endif /* __ASSEMBLY__ */
ab537dca
AK
257
258#endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */