Merge tag 'mm-stable-2024-05-24-11-49' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / arch / powerpc / include / asm / book3s / 64 / hash-4k.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
ab537dca
AK
2#ifndef _ASM_POWERPC_BOOK3S_64_HASH_4K_H
3#define _ASM_POWERPC_BOOK3S_64_HASH_4K_H
423e2f94 4
eea86aa4
ME
5#define H_PTE_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps: 2^9 x 4KB = 2MB
6#define H_PMD_INDEX_SIZE 7 // size: 8B << 7 = 1KB, maps: 2^7 x 2MB = 256MB
7#define H_PUD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps: 2^9 x 256MB = 128GB
8#define H_PGD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps: 2^9 x 128GB = 64TB
ab537dca 9
f384796c
AK
10/*
11 * Each context is 512TB. But on 4k we restrict our max TASK size to 64TB
12 * Hence also limit max EA bits to 64TB.
13 */
14#define MAX_EA_BITS_PER_CONTEXT 46
15
1c946c1b 16
0034d395 17/*
7746406b
AK
18 * Our page table limit us to 64TB. For 64TB physical memory, we only need 64GB
19 * of vmemmap space. To better support sparse memory layout, we use 61TB
20 * linear map range, 1TB of vmalloc, 1TB of I/O and 1TB of vmememmap.
0034d395 21 */
7746406b 22#define REGION_SHIFT (40)
1c946c1b 23#define H_KERN_MAP_SIZE (ASM_CONST(1) << REGION_SHIFT)
0034d395 24
b32d5d7e
AK
25/*
26 * Limits the linear mapping range
27 */
28#define H_MAX_PHYSMEM_BITS 46
29
0034d395 30/*
7746406b 31 * Define the address range of the kernel non-linear virtual area (61TB)
0034d395 32 */
7746406b 33#define H_KERN_VIRT_START ASM_CONST(0xc0003d0000000000)
0034d395 34
ab537dca 35#ifndef __ASSEMBLY__
dd1842a2
AK
36#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
37#define H_PMD_TABLE_SIZE (sizeof(pmd_t) << H_PMD_INDEX_SIZE)
38#define H_PUD_TABLE_SIZE (sizeof(pud_t) << H_PUD_INDEX_SIZE)
39#define H_PGD_TABLE_SIZE (sizeof(pgd_t) << H_PGD_INDEX_SIZE)
ab537dca 40
ee8b3933
AK
41#define H_PAGE_F_GIX_SHIFT _PAGE_PA_MAX
42#define H_PAGE_F_SECOND _RPAGE_PKEY_BIT0 /* HPTE is in 2ndary HPTEG */
43#define H_PAGE_F_GIX (_RPAGE_RPN43 | _RPAGE_RPN42 | _RPAGE_RPN41)
44#define H_PAGE_BUSY _RPAGE_RSV1
45#define H_PAGE_HASHPTE _RPAGE_PKEY_BIT4
9d2edb18 46
c605782b 47/* PTE flags to conserve for HPTE identification */
945537df
AK
48#define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_HASHPTE | \
49 H_PAGE_F_SECOND | H_PAGE_F_GIX)
50/*
51 * Not supported by 4k linux page size
52 */
53#define H_PAGE_4K_PFN 0x0
54#define H_PAGE_THP_HUGE 0x0
55#define H_PAGE_COMBO 0x0
1c7ec8a4
AK
56
57/* 8 bytes per each pte entry */
58#define H_PTE_FRAG_SIZE_SHIFT (H_PTE_INDEX_SIZE + 3)
59#define H_PTE_FRAG_NR (PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT)
8a6c697b
AK
60#define H_PMD_FRAG_SIZE_SHIFT (H_PMD_INDEX_SIZE + 3)
61#define H_PMD_FRAG_NR (PAGE_SIZE >> H_PMD_FRAG_SIZE_SHIFT)
1a2f7789
AK
62
63/* memory key bits, only 8 keys supported */
33699023
AK
64#define H_PTE_PKEY_BIT4 0
65#define H_PTE_PKEY_BIT3 0
ee8b3933
AK
66#define H_PTE_PKEY_BIT2 _RPAGE_PKEY_BIT3
67#define H_PTE_PKEY_BIT1 _RPAGE_PKEY_BIT2
68#define H_PTE_PKEY_BIT0 _RPAGE_PKEY_BIT1
33699023 69
1a2f7789 70
ab537dca 71/*
368ced78 72 * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range()
ab537dca 73 */
ab537dca
AK
74#define remap_4k_pfn(vma, addr, pfn, prot) \
75 remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
76
26a344ae 77#ifdef CONFIG_HUGETLB_PAGE
c0a6c719 78static inline int hash__hugepd_ok(hugepd_t hpd)
26a344ae 79{
20717e1f 80 unsigned long hpdval = hpd_val(hpd);
26a344ae 81 /*
6a119eae
AK
82 * if it is not a pte and have hugepd shift mask
83 * set, then it is a hugepd directory pointer
26a344ae 84 */
f1981b5b 85 if (!(hpdval & _PAGE_PTE) && (hpdval & _PAGE_PRESENT) &&
20717e1f 86 ((hpdval & HUGEPD_SHIFT_MASK) != 0))
6a119eae
AK
87 return true;
88 return false;
26a344ae 89}
26a344ae
AK
90#endif
91
59aa31fd
RP
92/*
93 * 4K PTE format is different from 64K PTE format. Saving the hash_slot is just
94 * a matter of returning the PTE bits that need to be modified. On 64K PTE,
95 * things are a little more involved and hence needs many more parameters to
96 * accomplish the same. However we want to abstract this out from the caller by
97 * keeping the prototype consistent across the two formats.
98 */
99static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
ff31e105
AK
100 unsigned int subpg_index, unsigned long hidx,
101 int offset)
59aa31fd
RP
102{
103 return (hidx << H_PAGE_F_GIX_SHIFT) &
104 (H_PAGE_F_SECOND | H_PAGE_F_GIX);
105}
106
ab624762
AK
107#ifdef CONFIG_TRANSPARENT_HUGEPAGE
108
109static inline char *get_hpte_slot_array(pmd_t *pmdp)
110{
111 BUG();
112 return NULL;
113}
114
115static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index)
116{
117 BUG();
118 return 0;
119}
120
121static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array,
122 int index)
123{
124 BUG();
125 return 0;
126}
127
128static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
129 unsigned int index, unsigned int hidx)
130{
131 BUG();
132}
133
134static inline int hash__pmd_trans_huge(pmd_t pmd)
135{
136 return 0;
137}
138
ab624762
AK
139static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
140{
141 BUG();
142 return pmd;
143}
144
145extern unsigned long hash__pmd_hugepage_update(struct mm_struct *mm,
146 unsigned long addr, pmd_t *pmdp,
147 unsigned long clr, unsigned long set);
148extern pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma,
149 unsigned long address, pmd_t *pmdp);
150extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
151 pgtable_t pgtable);
152extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
ab624762
AK
153extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
154 unsigned long addr, pmd_t *pmdp);
155extern int hash__has_transparent_hugepage(void);
156#endif
157
36b78402
AK
158static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
159{
160 BUG();
161 return pmd;
162}
163
ab537dca
AK
164#endif /* !__ASSEMBLY__ */
165
166#endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */