[PATCH] ppc64: support 64k pages
[linux-2.6-block.git] / include / asm-ppc64 / mmu.h
CommitLineData
1da177e4
LT
1/*
2 * PowerPC memory management structures
3 *
4 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
5 * PPC64 rework.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#ifndef _PPC64_MMU_H_
14#define _PPC64_MMU_H_
15
16#include <linux/config.h>
5f7c6907 17#include <asm/ppc_asm.h> /* for ASM_CONST */
1da177e4 18#include <asm/page.h>
1da177e4 19
1f8d419e
DG
20/*
21 * Segment table
22 */
1da177e4
LT
23
24#define STE_ESID_V 0x80
25#define STE_ESID_KS 0x20
26#define STE_ESID_KP 0x10
27#define STE_ESID_N 0x08
28
29#define STE_VSID_SHIFT 12
30
1f8d419e 31/* Location of cpu0's segment table */
c59c464a 32#define STAB0_PAGE 0x6
637a6ff6 33#define STAB0_PHYS_ADDR (STAB0_PAGE<<12)
c59c464a
DG
34
35#ifndef __ASSEMBLY__
36extern char initial_stab[];
37#endif /* ! __ASSEMBLY */
1f8d419e
DG
38
39/*
40 * SLB
41 */
1da177e4 42
1f8d419e
DG
43#define SLB_NUM_BOLTED 3
44#define SLB_CACHE_ENTRIES 8
45
46/* Bits in the SLB ESID word */
47#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
48
49/* Bits in the SLB VSID word */
50#define SLB_VSID_SHIFT 12
3c726f8d
BH
51#define SLB_VSID_B ASM_CONST(0xc000000000000000)
52#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
53#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
1f8d419e
DG
54#define SLB_VSID_KS ASM_CONST(0x0000000000000800)
55#define SLB_VSID_KP ASM_CONST(0x0000000000000400)
56#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
3c726f8d 57#define SLB_VSID_L ASM_CONST(0x0000000000000100)
1f8d419e 58#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
3c726f8d
BH
59#define SLB_VSID_LP ASM_CONST(0x0000000000000030)
60#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
61#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
62#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
63#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
64#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
65
14b34661
DG
66#define SLB_VSID_KERNEL (SLB_VSID_KP)
67#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
68
69#define SLBIE_C (0x08000000)
1f8d419e
DG
70
71/*
72 * Hash table
73 */
1da177e4
LT
74
75#define HPTES_PER_GROUP 8
76
96e28449
DG
77#define HPTE_V_AVPN_SHIFT 7
78#define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80)
79#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
3c726f8d 80#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & HPTE_V_AVPN))
96e28449
DG
81#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
82#define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
83#define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
84#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
85#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
86
87#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
88#define HPTE_R_TS ASM_CONST(0x4000000000000000)
89#define HPTE_R_RPN_SHIFT 12
90#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000)
91#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
92#define HPTE_R_PP ASM_CONST(0x0000000000000003)
3c726f8d 93#define HPTE_R_N ASM_CONST(0x0000000000000004)
96e28449 94
1f8d419e
DG
95/* Values for PP (assumes Ks=0, Kp=1) */
96/* pp0 will always be 0 for linux */
97#define PP_RWXX 0 /* Supervisor read/write, User none */
98#define PP_RWRX 1 /* Supervisor read/write, User read */
99#define PP_RWRW 2 /* Supervisor read/write, User read/write */
100#define PP_RXRX 3 /* Supervisor read, User read */
101
102#ifndef __ASSEMBLY__
103
1da177e4 104typedef struct {
96e28449
DG
105 unsigned long v;
106 unsigned long r;
107} hpte_t;
1da177e4 108
96e28449
DG
109extern hpte_t *htab_address;
110extern unsigned long htab_hash_mask;
1da177e4 111
3c726f8d
BH
112/*
113 * Page size definition
114 *
115 * shift : is the "PAGE_SHIFT" value for that page size
116 * sllp : is a bit mask with the value of SLB L || LP to be or'ed
117 * directly to a slbmte "vsid" value
118 * penc : is the HPTE encoding mask for the "LP" field:
119 *
120 */
121struct mmu_psize_def
1da177e4 122{
3c726f8d
BH
123 unsigned int shift; /* number of bits */
124 unsigned int penc; /* HPTE encoding */
125 unsigned int tlbiel; /* tlbiel supported for that page size */
126 unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
127 unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
128};
1da177e4 129
3c726f8d 130#endif /* __ASSEMBLY__ */
1da177e4 131
3c726f8d
BH
132/*
133 * The kernel use the constants below to index in the page sizes array.
134 * The use of fixed constants for this purpose is better for performances
135 * of the low level hash refill handlers.
136 *
137 * A non supported page size has a "shift" field set to 0
138 *
139 * Any new page size being implemented can get a new entry in here. Whether
140 * the kernel will use it or not is a different matter though. The actual page
141 * size used by hugetlbfs is not defined here and may be made variable
142 */
1da177e4 143
3c726f8d
BH
144#define MMU_PAGE_4K 0 /* 4K */
145#define MMU_PAGE_64K 1 /* 64K */
146#define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */
147#define MMU_PAGE_1M 3 /* 1M */
148#define MMU_PAGE_16M 4 /* 16M */
149#define MMU_PAGE_16G 5 /* 16G */
150#define MMU_PAGE_COUNT 6
1da177e4 151
3c726f8d 152#ifndef __ASSEMBLY__
1da177e4 153
3c726f8d
BH
154/*
155 * The current system page sizes
156 */
157extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
158extern int mmu_linear_psize;
159extern int mmu_virtual_psize;
f4c82d51 160
3c726f8d
BH
161#ifdef CONFIG_HUGETLB_PAGE
162/*
163 * The page size index of the huge pages for use by hugetlbfs
164 */
165extern int mmu_huge_psize;
f4c82d51 166
3c726f8d 167#endif /* CONFIG_HUGETLB_PAGE */
f4c82d51 168
3c726f8d
BH
169/*
170 * This function sets the AVPN and L fields of the HPTE appropriately
171 * for the page size
172 */
173static inline unsigned long hpte_encode_v(unsigned long va, int psize)
174{
175 unsigned long v =
176 v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
177 v <<= HPTE_V_AVPN_SHIFT;
178 if (psize != MMU_PAGE_4K)
179 v |= HPTE_V_LARGE;
180 return v;
181}
f4c82d51 182
3c726f8d
BH
183/*
184 * This function sets the ARPN, and LP fields of the HPTE appropriately
185 * for the page size. We assume the pa is already "clean" that is properly
186 * aligned for the requested page size
187 */
188static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
189{
190 unsigned long r;
f4c82d51 191
3c726f8d
BH
192 /* A 4K page needs no special encoding */
193 if (psize == MMU_PAGE_4K)
194 return pa & HPTE_R_RPN;
195 else {
196 unsigned int penc = mmu_psize_defs[psize].penc;
197 unsigned int shift = mmu_psize_defs[psize].shift;
198 return (pa & ~((1ul << shift) - 1)) | (penc << 12);
f4c82d51 199 }
3c726f8d 200 return r;
f4c82d51
S
201}
202
1da177e4 203/*
3c726f8d 204 * This hashes a virtual address for a 256Mb segment only for now
1da177e4 205 */
3c726f8d
BH
206
207static inline unsigned long hpt_hash(unsigned long va, unsigned int shift)
208{
209 return ((va >> 28) & 0x7fffffffffUL) ^ ((va & 0x0fffffffUL) >> shift);
210}
211
212extern int __hash_page_4K(unsigned long ea, unsigned long access,
213 unsigned long vsid, pte_t *ptep, unsigned long trap,
214 unsigned int local);
215extern int __hash_page_64K(unsigned long ea, unsigned long access,
216 unsigned long vsid, pte_t *ptep, unsigned long trap,
217 unsigned int local);
218struct mm_struct;
219extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
220 unsigned long ea, unsigned long vsid, int local);
1da177e4
LT
221
222extern void htab_finish_init(void);
3c726f8d
BH
223extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
224 unsigned long pstart, unsigned long mode,
225 int psize);
1da177e4 226
1f8d419e
DG
227extern void hpte_init_native(void);
228extern void hpte_init_lpar(void);
229extern void hpte_init_iSeries(void);
230
231extern long pSeries_lpar_hpte_insert(unsigned long hpte_group,
232 unsigned long va, unsigned long prpn,
3c726f8d
BH
233 unsigned long rflags,
234 unsigned long vflags, int psize);
235
236extern long native_hpte_insert(unsigned long hpte_group,
237 unsigned long va, unsigned long prpn,
238 unsigned long rflags,
239 unsigned long vflags, int psize);
1f8d419e 240
3c726f8d
BH
241extern long iSeries_hpte_insert(unsigned long hpte_group,
242 unsigned long va, unsigned long prpn,
243 unsigned long rflags,
244 unsigned long vflags, int psize);
4c55130b 245
533f0817 246extern void stabs_alloc(void);
3c726f8d 247extern void slb_initialize(void);
533f0817 248
1da177e4
LT
249#endif /* __ASSEMBLY__ */
250
251/*
1f8d419e
DG
252 * VSID allocation
253 *
254 * We first generate a 36-bit "proto-VSID". For kernel addresses this
255 * is equal to the ESID, for user addresses it is:
256 * (context << 15) | (esid & 0x7fff)
257 *
258 * The two forms are distinguishable because the top bit is 0 for user
259 * addresses, whereas the top two bits are 1 for kernel addresses.
260 * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
261 * now.
262 *
263 * The proto-VSIDs are then scrambled into real VSIDs with the
264 * multiplicative hash:
265 *
266 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
267 * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
268 * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
269 *
270 * This scramble is only well defined for proto-VSIDs below
271 * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
272 * reserved. VSID_MULTIPLIER is prime, so in particular it is
273 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
274 * Because the modulus is 2^n-1 we can compute it efficiently without
275 * a divide or extra multiply (see below).
276 *
277 * This scheme has several advantages over older methods:
278 *
279 * - We have VSIDs allocated for every kernel address
280 * (i.e. everything above 0xC000000000000000), except the very top
281 * segment, which simplifies several things.
282 *
283 * - We allow for 15 significant bits of ESID and 20 bits of
284 * context for user addresses. i.e. 8T (43 bits) of address space for
285 * up to 1M contexts (although the page table structure and context
286 * allocation will need changes to take advantage of this).
287 *
288 * - The scramble function gives robust scattering in the hash
289 * table (at least based on some initial results). The previous
290 * method was more susceptible to pathological cases giving excessive
291 * hash collisions.
292 */
293/*
294 * WARNING - If you change these you must make sure the asm
295 * implementations in slb_allocate (slb_low.S), do_stab_bolted
296 * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
297 *
298 * You'll also need to change the precomputed VSID values in head.S
299 * which are used by the iSeries firmware.
1da177e4 300 */
1da177e4
LT
301
302#define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */
303#define VSID_BITS 36
304#define VSID_MODULUS ((1UL<<VSID_BITS)-1)
305
e28f7faf
DG
306#define CONTEXT_BITS 19
307#define USER_ESID_BITS 16
308
309#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
1da177e4
LT
310
311/*
312 * This macro generates asm code to compute the VSID scramble
313 * function. Used in slb_allocate() and do_stab_bolted. The function
314 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
315 *
316 * rt = register continaing the proto-VSID and into which the
317 * VSID will be stored
318 * rx = scratch register (clobbered)
319 *
320 * - rt and rx must be different registers
321 * - The answer will end up in the low 36 bits of rt. The higher
322 * bits may contain other garbage, so you may need to mask the
323 * result.
324 */
325#define ASM_VSID_SCRAMBLE(rt, rx) \
326 lis rx,VSID_MULTIPLIER@h; \
327 ori rx,rx,VSID_MULTIPLIER@l; \
328 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
329 \
330 srdi rx,rt,VSID_BITS; \
331 clrldi rt,rt,(64-VSID_BITS); \
332 add rt,rt,rx; /* add high and low bits */ \
333 /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
334 * 2^36-1+2^28-1. That in particular means that if r3 >= \
335 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
336 * the bit clear, r3 already has the answer we want, if it \
337 * doesn't, the answer is the low 36 bits of r3+1. So in all \
338 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
339 addi rx,rt,1; \
340 srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \
341 add rt,rt,rx
342
1f8d419e
DG
343
344#ifndef __ASSEMBLY__
345
346typedef unsigned long mm_context_id_t;
347
348typedef struct {
349 mm_context_id_t id;
350#ifdef CONFIG_HUGETLB_PAGE
c594adad 351 u16 low_htlb_areas, high_htlb_areas;
1f8d419e
DG
352#endif
353} mm_context_t;
354
355
356static inline unsigned long vsid_scramble(unsigned long protovsid)
357{
358#if 0
359 /* The code below is equivalent to this function for arguments
360 * < 2^VSID_BITS, which is all this should ever be called
361 * with. However gcc is not clever enough to compute the
362 * modulus (2^n-1) without a second multiply. */
363 return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS);
364#else /* 1 */
365 unsigned long x;
366
367 x = protovsid * VSID_MULTIPLIER;
368 x = (x >> VSID_BITS) + (x & VSID_MODULUS);
369 return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS;
370#endif /* 1 */
371}
372
373/* This is only valid for addresses >= KERNELBASE */
374static inline unsigned long get_kernel_vsid(unsigned long ea)
375{
376 return vsid_scramble(ea >> SID_SHIFT);
377}
378
379/* This is only valid for user addresses (which are below 2^41) */
380static inline unsigned long get_vsid(unsigned long context, unsigned long ea)
381{
382 return vsid_scramble((context << USER_ESID_BITS)
383 | (ea >> SID_SHIFT));
384}
385
488f8499
DG
386#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS)
387#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea))
388
1f8d419e
DG
389#endif /* __ASSEMBLY */
390
1da177e4 391#endif /* _PPC64_MMU_H_ */