powerpc/mm: Add helpers for accessing hash translation related variables
[linux-block.git] / arch / powerpc / include / asm / nohash / 32 / mmu-8xx.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
31202345
DG
2#ifndef _ASM_POWERPC_MMU_8XX_H_
3#define _ASM_POWERPC_MMU_8XX_H_
4/*
5 * PPC8xx support
6 */
7
8/* Control/status registers for the MPC8xx.
9 * A write operation to these registers causes serialized access.
10 * During software tablewalk, the registers used perform mask/shift-add
11 * operations when written/read. A TLB entry is created when the Mx_RPN
12 * is written, and the contents of several registers are used to
13 * create the entry.
14 */
15#define SPRN_MI_CTR 784 /* Instruction TLB control register */
16#define MI_GPM 0x80000000 /* Set domain manager mode */
17#define MI_PPM 0x40000000 /* Set subpage protection */
18#define MI_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */
19#define MI_RSV4I 0x08000000 /* Reserve 4 TLB entries */
20#define MI_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
21#define MI_IDXMASK 0x00001f00 /* TLB index to be loaded */
22#define MI_RESETVAL 0x00000000 /* Value of register at reset */
23
24/* These are the Ks and Kp from the PowerPC books. For proper operation,
25 * Ks = 0, Kp = 1.
26 */
27#define SPRN_MI_AP 786
28#define MI_Ks 0x80000000 /* Should not be set */
29#define MI_Kp 0x40000000 /* Should always be set */
30
5b2753fc 31/*
de0f9387
CL
32 * All pages' PP data bits are set to either 001 or 011 by copying _PAGE_EXEC
33 * into bit 21 in the ITLBmiss handler (bit 21 is the middle bit), which means
34 * respectively NA for All or X for Supervisor and no access for User.
35 * Then we use the APG to say whether accesses are according to Page rules or
36 * "all Supervisor" rules (Access to all)
cc4ebf5c 37 * Therefore, we define 2 APG groups. lsb is _PMD_USER
c341a108 38 * 0 => Kernel => 01 (all accesses performed according to page definition)
cc4ebf5c 39 * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
c341a108 40 * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
5b2753fc 41 */
c341a108 42#define MI_APG_INIT 0x4fffffff
5b2753fc 43
06fbe81b
CL
44/*
45 * 0 => Kernel => 01 (all accesses performed according to page definition)
46 * 1 => User => 10 (all accesses performed according to swaped page definition)
47 * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
48 */
49#define MI_APG_KUEP 0x6fffffff
50
31202345
DG
51/* The effective page number register. When read, contains the information
52 * about the last instruction TLB miss. When MI_RPN is written, bits in
53 * this register are used to create the TLB entry.
54 */
55#define SPRN_MI_EPN 787
56#define MI_EPNMASK 0xfffff000 /* Effective page number for entry */
57#define MI_EVALID 0x00000200 /* Entry is valid */
58#define MI_ASIDMASK 0x0000000f /* ASID match value */
59 /* Reset value is undefined */
60
61/* A "level 1" or "segment" or whatever you want to call it register.
62 * For the instruction TLB, it contains bits that get loaded into the
63 * TLB entry when the MI_RPN is written.
64 */
65#define SPRN_MI_TWC 789
66#define MI_APG 0x000001e0 /* Access protection group (0) */
67#define MI_GUARDED 0x00000010 /* Guarded storage */
68#define MI_PSMASK 0x0000000c /* Mask of page size bits */
69#define MI_PS8MEG 0x0000000c /* 8M page size */
70#define MI_PS512K 0x00000004 /* 512K page size */
71#define MI_PS4K_16K 0x00000000 /* 4K or 16K page size */
72#define MI_SVALID 0x00000001 /* Segment entry is valid */
73 /* Reset value is undefined */
74
75/* Real page number. Defined by the pte. Writing this register
76 * causes a TLB entry to be created for the instruction TLB, using
77 * additional information from the MI_EPN, and MI_TWC registers.
78 */
79#define SPRN_MI_RPN 790
959d6173 80#define MI_SPS16K 0x00000008 /* Small page size (0 = 4k, 1 = 16k) */
31202345
DG
81
82/* Define an RPN value for mapping kernel memory to large virtual
83 * pages for boot initialization. This has real page number of 0,
84 * large page size, shared page, cache enabled, and valid.
85 * Also mark all subpages valid and write access.
86 */
87#define MI_BOOTINIT 0x000001fd
88
89#define SPRN_MD_CTR 792 /* Data TLB control register */
90#define MD_GPM 0x80000000 /* Set domain manager mode */
91#define MD_PPM 0x40000000 /* Set subpage protection */
92#define MD_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */
93#define MD_WTDEF 0x10000000 /* Set writethrough when MMU dis */
94#define MD_RSV4I 0x08000000 /* Reserve 4 TLB entries */
95#define MD_TWAM 0x04000000 /* Use 4K page hardware assist */
96#define MD_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
97#define MD_IDXMASK 0x00001f00 /* TLB index to be loaded */
98#define MD_RESETVAL 0x04000000 /* Value of register at reset */
99
100#define SPRN_M_CASID 793 /* Address space ID (context) to match */
101#define MC_ASIDMASK 0x0000000f /* Bits used for ASID value */
102
103
104/* These are the Ks and Kp from the PowerPC books. For proper operation,
105 * Ks = 0, Kp = 1.
106 */
107#define SPRN_MD_AP 794
108#define MD_Ks 0x80000000 /* Should not be set */
109#define MD_Kp 0x40000000 /* Should always be set */
110
5b2753fc 111/*
de0f9387 112 * All pages' PP data bits are set to either 000 or 011 or 001, which means
5b2753fc 113 * respectively RW for Supervisor and no access for User, or RO for
de0f9387 114 * Supervisor and no access for user and NA for ALL.
5b2753fc
LC
115 * Then we use the APG to say whether accesses are according to Page rules or
116 * "all Supervisor" rules (Access to all)
cc4ebf5c 117 * Therefore, we define 2 APG groups. lsb is _PMD_USER
c341a108 118 * 0 => Kernel => 01 (all accesses performed according to page definition)
cc4ebf5c 119 * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
c341a108 120 * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
5b2753fc 121 */
c341a108 122#define MD_APG_INIT 0x4fffffff
5b2753fc 123
2679f9bd
CL
124/*
125 * 0 => No user => 01 (all accesses performed according to page definition)
126 * 1 => User => 10 (all accesses performed according to swaped page definition)
127 * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
128 */
129#define MD_APG_KUAP 0x6fffffff
130
31202345
DG
131/* The effective page number register. When read, contains the information
132 * about the last instruction TLB miss. When MD_RPN is written, bits in
133 * this register are used to create the TLB entry.
134 */
135#define SPRN_MD_EPN 795
136#define MD_EPNMASK 0xfffff000 /* Effective page number for entry */
137#define MD_EVALID 0x00000200 /* Entry is valid */
138#define MD_ASIDMASK 0x0000000f /* ASID match value */
139 /* Reset value is undefined */
140
141/* The pointer to the base address of the first level page table.
142 * During a software tablewalk, reading this register provides the address
143 * of the entry associated with MD_EPN.
144 */
145#define SPRN_M_TWB 796
146#define M_L1TB 0xfffff000 /* Level 1 table base address */
147#define M_L1INDX 0x00000ffc /* Level 1 index, when read */
148 /* Reset value is undefined */
149
150/* A "level 1" or "segment" or whatever you want to call it register.
151 * For the data TLB, it contains bits that get loaded into the TLB entry
152 * when the MD_RPN is written. It is also provides the hardware assist
153 * for finding the PTE address during software tablewalk.
154 */
155#define SPRN_MD_TWC 797
156#define MD_L2TB 0xfffff000 /* Level 2 table base address */
157#define MD_L2INDX 0xfffffe00 /* Level 2 index (*pte), when read */
158#define MD_APG 0x000001e0 /* Access protection group (0) */
159#define MD_GUARDED 0x00000010 /* Guarded storage */
160#define MD_PSMASK 0x0000000c /* Mask of page size bits */
161#define MD_PS8MEG 0x0000000c /* 8M page size */
162#define MD_PS512K 0x00000004 /* 512K page size */
163#define MD_PS4K_16K 0x00000000 /* 4K or 16K page size */
164#define MD_WT 0x00000002 /* Use writethrough page attribute */
165#define MD_SVALID 0x00000001 /* Segment entry is valid */
166 /* Reset value is undefined */
167
168
169/* Real page number. Defined by the pte. Writing this register
170 * causes a TLB entry to be created for the data TLB, using
171 * additional information from the MD_EPN, and MD_TWC registers.
172 */
173#define SPRN_MD_RPN 798
959d6173 174#define MD_SPS16K 0x00000008 /* Small page size (0 = 4k, 1 = 16k) */
31202345
DG
175
176/* This is a temporary storage register that could be used to save
177 * a processor working register during a tablewalk.
178 */
179#define SPRN_M_TW 799
180
15472423
CL
181#ifdef CONFIG_PPC_MM_SLICES
182#include <asm/nohash/32/slice.h>
183#define SLICE_ARRAY_SIZE (1 << (32 - SLICE_LOW_SHIFT - 1))
60458fba 184#define LOW_SLICE_ARRAY_SZ SLICE_ARRAY_SIZE
15472423
CL
185#endif
186
31202345 187#ifndef __ASSEMBLY__
5709f7cf
NP
188struct slice_mask {
189 u64 low_slices;
190 DECLARE_BITMAP(high_slices, 0);
191};
192
31202345 193typedef struct {
2ca8cf73
BH
194 unsigned int id;
195 unsigned int active;
31202345 196 unsigned long vdso_base;
aa0ab02b
CL
197#ifdef CONFIG_PPC_MM_SLICES
198 u16 user_psize; /* page size index */
15472423 199 unsigned char low_slices_psize[SLICE_ARRAY_SIZE];
aa0ab02b
CL
200 unsigned char high_slices_psize[0];
201 unsigned long slb_addr_limit;
5709f7cf
NP
202 struct slice_mask mask_base_psize; /* 4k or 16k */
203# ifdef CONFIG_HUGETLB_PAGE
204 struct slice_mask mask_512k;
205 struct slice_mask mask_8m;
206# endif
aa0ab02b 207#endif
55c8fc3f 208 void *pte_frag;
31202345 209} mm_context_t;
f86ef74e 210
60458fba
AK
211#ifdef CONFIG_PPC_MM_SLICES
212static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
213{
214 return ctx->user_psize;
215}
216
217static inline void mm_ctx_set_user_psize(mm_context_t *ctx, u16 user_psize)
218{
219 ctx->user_psize = user_psize;
220}
221
222static inline unsigned char *mm_ctx_low_slices(mm_context_t *ctx)
223{
224 return ctx->low_slices_psize;
225}
226
227static inline unsigned char *mm_ctx_high_slices(mm_context_t *ctx)
228{
229 return ctx->high_slices_psize;
230}
231
232static inline unsigned long mm_ctx_slb_addr_limit(mm_context_t *ctx)
233{
234 return ctx->slb_addr_limit;
235}
236
237static inline void mm_ctx_set_slb_addr_limit(mm_context_t *ctx, unsigned long limit)
238{
239 ctx->slb_addr_limit = limit;
240}
241
242static inline struct slice_mask *mm_ctx_slice_mask_base(mm_context_t *ctx)
243{
244 return &ctx->mask_base_psize;
245}
246
247#ifdef CONFIG_HUGETLB_PAGE
248static inline struct slice_mask *mm_ctx_slice_mask_512k(mm_context_t *ctx)
249{
250 return &ctx->mask_512k;
251}
252
253static inline struct slice_mask *mm_ctx_slice_mask_8m(mm_context_t *ctx)
254{
255 return &ctx->mask_8m;
256}
257#endif
258#endif /* CONFIG_PPC_MM_SLICE */
259
f86ef74e
CL
260#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
261#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
4b914286
CL
262
263/* Page size definitions, common between 32 and 64-bit
264 *
265 * shift : is the "PAGE_SHIFT" value for that page size
266 * penc : is the pte encoding mask
267 *
268 */
269struct mmu_psize_def {
270 unsigned int shift; /* number of bits */
271 unsigned int enc; /* PTE encoding */
272 unsigned int ind; /* Corresponding indirect page size shift */
273 unsigned int flags;
274#define MMU_PAGE_SIZE_DIRECT 0x1 /* Supported as a direct size */
275#define MMU_PAGE_SIZE_INDIRECT 0x2 /* Supported as an indirect size */
276};
277
278extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
279
280static inline int shift_to_mmu_psize(unsigned int shift)
281{
282 int psize;
283
284 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
285 if (mmu_psize_defs[psize].shift == shift)
286 return psize;
287 return -1;
288}
289
290static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
291{
292 if (mmu_psize_defs[mmu_psize].shift)
293 return mmu_psize_defs[mmu_psize].shift;
294 BUG();
295}
296
1a210878 297/* patch sites */
d5f17ee9 298extern s32 patch__itlbmiss_linmem_top, patch__itlbmiss_linmem_top8;
1a210878
CL
299extern s32 patch__dtlbmiss_linmem_top, patch__dtlbmiss_immr_jmp;
300extern s32 patch__fixupdar_linmem_top;
d5f17ee9 301extern s32 patch__dtlbmiss_romem_top, patch__dtlbmiss_romem_top8;
1a210878 302
709cf19c
CL
303extern s32 patch__itlbmiss_exit_1, patch__itlbmiss_exit_2;
304extern s32 patch__dtlbmiss_exit_1, patch__dtlbmiss_exit_2, patch__dtlbmiss_exit_3;
305extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf;
306
31202345
DG
307#endif /* !__ASSEMBLY__ */
308
7ee5cf6b 309#if defined(CONFIG_PPC_4K_PAGES)
25d21ad6 310#define mmu_virtual_psize MMU_PAGE_4K
7ee5cf6b 311#elif defined(CONFIG_PPC_16K_PAGES)
86c3b16e 312#define mmu_virtual_psize MMU_PAGE_16K
55c8fc3f
CL
313#define PTE_FRAG_NR 4
314#define PTE_FRAG_SIZE_SHIFT 12
315#define PTE_FRAG_SIZE (1UL << 12)
86c3b16e
LC
316#else
317#error "Unsupported PAGE_SIZE"
318#endif
319
25d21ad6
BH
320#define mmu_linear_psize MMU_PAGE_8M
321
31202345 322#endif /* _ASM_POWERPC_MMU_8XX_H_ */