powerpc/mm: Move slb_addr_linit to early_init_mmu
[linux-2.6-block.git] / arch / powerpc / include / asm / book3s / 64 / mmu.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
11a6f6ab
AK
2#ifndef _ASM_POWERPC_BOOK3S_64_MMU_H_
3#define _ASM_POWERPC_BOOK3S_64_MMU_H_
4
d09780f3
CL
5#include <asm/page.h>
6
11a6f6ab
AK
7#ifndef __ASSEMBLY__
8/*
9 * Page size definition
10 *
11 * shift : is the "PAGE_SHIFT" value for that page size
12 * sllp : is a bit mask with the value of SLB L || LP to be or'ed
13 * directly to a slbmte "vsid" value
14 * penc : is the HPTE encoding mask for the "LP" field:
15 *
16 */
17struct mmu_psize_def {
18 unsigned int shift; /* number of bits */
19 int penc[MMU_PAGE_COUNT]; /* HPTE encoding */
20 unsigned int tlbiel; /* tlbiel supported for that page size */
21 unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
2bfd65e4
AK
22 union {
23 unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
24 unsigned long ap; /* Ap encoding used by PowerISA 3.0 */
25 };
11a6f6ab
AK
26};
27extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
566ca99a 28
d09780f3
CL
29/*
30 * For BOOK3s 64 with 4k and 64K linux page size
31 * we want to use pointers, because the page table
32 * actually store pfn
33 */
34typedef pte_t *pgtable_t;
35
11a6f6ab
AK
36#endif /* __ASSEMBLY__ */
37
6161a373
AK
38/*
39 * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
40 * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
41 * page_to_nid does a page->section->node lookup
42 * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce
43 * memory requirements with large number of sections.
44 * 51 bits is the max physical real address on POWER9
45 */
46#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) && \
47 defined(CONFIG_PPC_64K_PAGES)
48#define MAX_PHYSMEM_BITS 51
49#else
50#define MAX_PHYSMEM_BITS 46
51#endif
52
11a6f6ab
AK
53/* 64-bit classic hash table MMU */
54#include <asm/book3s/64/mmu-hash.h>
11a6f6ab
AK
55
56#ifndef __ASSEMBLY__
e9983344 57/*
8ab102d6 58 * ISA 3.0 partition and process table entry format
e9983344
AK
59 */
60struct prtb_entry {
61 __be64 prtb0;
62 __be64 prtb1;
63};
64extern struct prtb_entry *process_tb;
65
66struct patb_entry {
67 __be64 patb0;
68 __be64 patb1;
69};
70extern struct patb_entry *partition_tb;
71
dbcbfee0 72/* Bits in patb0 field */
e9983344 73#define PATB_HR (1UL << 63)
70cd4c10 74#define RPDB_MASK 0x0fffffffffffff00UL
e9983344 75#define RPDB_SHIFT (1UL << 8)
dbcbfee0
PM
76#define RTS1_SHIFT 61 /* top 2 bits of radix tree size */
77#define RTS1_MASK (3UL << RTS1_SHIFT)
78#define RTS2_SHIFT 5 /* bottom 3 bits of radix tree size */
79#define RTS2_MASK (7UL << RTS2_SHIFT)
80#define RPDS_MASK 0x1f /* root page dir. size field */
81
82/* Bits in patb1 field */
83#define PATB_GR (1UL << 63) /* guest uses radix; must match HR */
84#define PRTS_MASK 0x1f /* process table size field */
70cd4c10 85#define PRTB_MASK 0x0ffffffffffff000UL
dbcbfee0 86
a25bd72b
BH
87/* Number of supported PID bits */
88extern unsigned int mmu_pid_bits;
89
90/* Base PID to allocate from */
91extern unsigned int mmu_base_pid;
92
93#define PRTB_SIZE_SHIFT (mmu_pid_bits + 4)
94#define PRTB_ENTRIES (1ul << mmu_pid_bits)
760573c1 95
e9983344
AK
96/*
97 * Power9 currently only support 64K partition table size.
98 */
99#define PATB_SIZE_SHIFT 16
11a6f6ab
AK
100
101typedef unsigned long mm_context_id_t;
102struct spinlock;
103
1ab66d1f
AP
104/* Maximum possible number of NPUs in a system. */
105#define NV_MAX_NPUS 8
106
5709f7cf
NP
107/*
108 * One bit per slice. We have lower slices which cover 256MB segments
109 * upto 4G range. That gets us 16 low slices. For the rest we track slices
110 * in 1TB size.
111 */
112struct slice_mask {
113 u64 low_slices;
114 DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
115};
116
11a6f6ab 117typedef struct {
f384796c
AK
118 union {
119 /*
120 * We use id as the PIDR content for radix. On hash we can use
121 * more than one id. The extended ids are used when we start
122 * having address above 512TB. We allocate one extended id
123 * for each 512TB. The new id is then used with the 49 bit
124 * EA to build a new VA. We always use ESID_BITS_1T_MASK bits
125 * from EA and new context ids to build the new VAs.
126 */
127 mm_context_id_t id;
128 mm_context_id_t extended_id[TASK_SIZE_USER64/TASK_CONTEXT_SIZE];
129 };
11a6f6ab
AK
130 u16 user_psize; /* page size index */
131
a619e59c
BH
132 /* Number of bits in the mm_cpumask */
133 atomic_t active_cpus;
134
aff6f8cb
BH
135 /* Number of users of the external (Nest) MMU */
136 atomic_t copros;
137
1ab66d1f
AP
138 /* NPU NMMU context */
139 struct npu_context *npu_context;
140
15472423 141 /* SLB page size encodings*/
60458fba 142 unsigned char low_slices_psize[LOW_SLICE_ARRAY_SZ];
11a6f6ab 143 unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
4722476b 144 unsigned long slb_addr_limit;
5709f7cf
NP
145# ifdef CONFIG_PPC_64K_PAGES
146 struct slice_mask mask_64k;
147# endif
148 struct slice_mask mask_4k;
149# ifdef CONFIG_HUGETLB_PAGE
150 struct slice_mask mask_16m;
151 struct slice_mask mask_16g;
152# endif
11a6f6ab
AK
153 unsigned long vdso_base;
154#ifdef CONFIG_PPC_SUBPAGE_PROT
155 struct subpage_prot_table spt;
156#endif /* CONFIG_PPC_SUBPAGE_PROT */
1c7ec8a4
AK
157 /*
158 * pagetable fragment support
159 */
11a6f6ab 160 void *pte_frag;
8a6c697b 161 void *pmd_frag;
11a6f6ab
AK
162#ifdef CONFIG_SPAPR_TCE_IOMMU
163 struct list_head iommu_group_mem_list;
164#endif
4fb158f6
RP
165
166#ifdef CONFIG_PPC_MEM_KEYS
167 /*
168 * Each bit represents one protection key.
169 * bit set -> key allocated
170 * bit unset -> key available for allocation
171 */
172 u32 pkey_allocation_map;
5586cf61 173 s16 execute_only_pkey; /* key holding execute-only protection */
4fb158f6 174#endif
11a6f6ab
AK
175} mm_context_t;
176
60458fba
AK
177static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
178{
179 return ctx->user_psize;
180}
181
182static inline void mm_ctx_set_user_psize(mm_context_t *ctx, u16 user_psize)
183{
184 ctx->user_psize = user_psize;
185}
186
187static inline unsigned char *mm_ctx_low_slices(mm_context_t *ctx)
188{
189 return ctx->low_slices_psize;
190}
191
192static inline unsigned char *mm_ctx_high_slices(mm_context_t *ctx)
193{
194 return ctx->high_slices_psize;
195}
196
197static inline unsigned long mm_ctx_slb_addr_limit(mm_context_t *ctx)
198{
199 return ctx->slb_addr_limit;
200}
201
202static inline void mm_ctx_set_slb_addr_limit(mm_context_t *ctx, unsigned long limit)
203{
204 ctx->slb_addr_limit = limit;
205}
206
207#ifdef CONFIG_PPC_64K_PAGES
208static inline struct slice_mask *mm_ctx_slice_mask_64k(mm_context_t *ctx)
209{
210 return &ctx->mask_64k;
211}
212#endif
213
214static inline struct slice_mask *mm_ctx_slice_mask_4k(mm_context_t *ctx)
215{
216 return &ctx->mask_4k;
217}
218
219#ifdef CONFIG_HUGETLB_PAGE
220static inline struct slice_mask *mm_ctx_slice_mask_16m(mm_context_t *ctx)
221{
222 return &ctx->mask_16m;
223}
224
225static inline struct slice_mask *mm_ctx_slice_mask_16g(mm_context_t *ctx)
226{
227 return &ctx->mask_16g;
228}
229#endif
230
231#ifdef CONFIG_PPC_SUBPAGE_PROT
232static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
233{
234 return &ctx->spt;
235}
236#endif
237
11a6f6ab
AK
238/*
239 * The current system page and segment sizes
240 */
241extern int mmu_linear_psize;
242extern int mmu_virtual_psize;
243extern int mmu_vmalloc_psize;
244extern int mmu_vmemmap_psize;
245extern int mmu_io_psize;
246
756d08d1 247/* MMU initialization */
1a01dc87 248void mmu_early_init_devtree(void);
bacf9cf8 249void hash__early_init_devtree(void);
2537b09c 250void radix__early_init_devtree(void);
2bfd65e4 251extern void radix_init_native(void);
756d08d1 252extern void hash__early_init_mmu(void);
2bfd65e4 253extern void radix__early_init_mmu(void);
756d08d1
AK
254static inline void early_init_mmu(void)
255{
2bfd65e4
AK
256 if (radix_enabled())
257 return radix__early_init_mmu();
756d08d1
AK
258 return hash__early_init_mmu();
259}
260extern void hash__early_init_mmu_secondary(void);
2bfd65e4 261extern void radix__early_init_mmu_secondary(void);
756d08d1
AK
262static inline void early_init_mmu_secondary(void)
263{
2bfd65e4
AK
264 if (radix_enabled())
265 return radix__early_init_mmu_secondary();
756d08d1
AK
266 return hash__early_init_mmu_secondary();
267}
268
269extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
270 phys_addr_t first_memblock_size);
2bfd65e4
AK
271extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
272 phys_addr_t first_memblock_size);
756d08d1
AK
273static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
274 phys_addr_t first_memblock_size)
275{
b8f1b4f8 276 if (early_radix_enabled())
2bfd65e4
AK
277 return radix__setup_initial_memory_limit(first_memblock_base,
278 first_memblock_size);
756d08d1
AK
279 return hash__setup_initial_memory_limit(first_memblock_base,
280 first_memblock_size);
281}
eea8148c
ME
282
283extern int (*register_process_table)(unsigned long base, unsigned long page_size,
284 unsigned long tbl_size);
285
cc3d2940
PM
286#ifdef CONFIG_PPC_PSERIES
287extern void radix_init_pseries(void);
288#else
289static inline void radix_init_pseries(void) { };
290#endif
291
c9f80734 292static inline int get_user_context(mm_context_t *ctx, unsigned long ea)
f384796c
AK
293{
294 int index = ea >> MAX_EA_BITS_PER_CONTEXT;
295
296 if (likely(index < ARRAY_SIZE(ctx->extended_id)))
297 return ctx->extended_id[index];
298
299 /* should never happen */
300 WARN_ON(1);
301 return 0;
302}
303
304static inline unsigned long get_user_vsid(mm_context_t *ctx,
305 unsigned long ea, int ssize)
306{
c9f80734 307 unsigned long context = get_user_context(ctx, ea);
f384796c
AK
308
309 return get_vsid(context, ea, ssize);
310}
311
11a6f6ab
AK
312#endif /* __ASSEMBLY__ */
313#endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */