[POWERPC] Use 1TB segments
[linux-block.git] / arch / powerpc / mm / slb_low.S
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Low-level SLB routines
3 *
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 *
6 * Based on earlier C version:
7 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
8 * Copyright (c) 2001 Dave Engebretsen
9 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
1da177e4 17#include <asm/processor.h>
1da177e4 18#include <asm/ppc_asm.h>
0013a854 19#include <asm/asm-offsets.h>
1da177e4 20#include <asm/cputable.h>
3c726f8d
BH
21#include <asm/page.h>
22#include <asm/mmu.h>
23#include <asm/pgtable.h>
3f639ee8 24#include <asm/firmware.h>
1da177e4 25
3c726f8d 26/* void slb_allocate_realmode(unsigned long ea);
1da177e4
LT
27 *
28 * Create an SLB entry for the given EA (user or kernel).
29 * r3 = faulting address, r13 = PACA
30 * r9, r10, r11 are clobbered by this function
31 * No other registers are examined or changed.
32 */
3c726f8d
BH
33_GLOBAL(slb_allocate_realmode)
34 /* r3 = faulting address */
1da177e4
LT
35
36 srdi r9,r3,60 /* get region */
3c726f8d 37 srdi r10,r3,28 /* get esid */
b5666f70 38 cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
1da177e4 39
b5666f70 40 /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
1da177e4
LT
41 blt cr7,0f /* user or kernel? */
42
43 /* kernel address: proto-VSID = ESID */
44 /* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
45 * this code will generate the protoVSID 0xfffffffff for the
46 * top segment. That's ok, the scramble below will translate
47 * it to VSID 0, which is reserved as a bad VSID - one which
48 * will never have any pages in it. */
1da177e4 49
3c726f8d
BH
50 /* Check if hitting the linear mapping of the vmalloc/ioremap
51 * kernel space
52 */
53 bne cr7,1f
54
55 /* Linear mapping encoding bits, the "li" instruction below will
56 * be patched by the kernel at boot
57 */
58_GLOBAL(slb_miss_kernel_load_linear)
59 li r11,0
1189be65 60BEGIN_FTR_SECTION
3c726f8d 61 b slb_finish_load
1189be65
PM
62END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
63 b slb_finish_load_1T
3c726f8d 64
bf72aeba 651: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below
3c726f8d
BH
66 * will be patched by the kernel at boot
67 */
bf72aeba
PM
68BEGIN_FTR_SECTION
69 /* check whether this is in vmalloc or ioremap space */
70 clrldi r11,r10,48
71 cmpldi r11,(VMALLOC_SIZE >> 28) - 1
72 bgt 5f
73 lhz r11,PACAVMALLOCSLLP(r13)
1189be65 74 b 6f
bf72aeba
PM
755:
76END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
77_GLOBAL(slb_miss_kernel_load_io)
3c726f8d 78 li r11,0
1189be65
PM
796:
80BEGIN_FTR_SECTION
3c726f8d 81 b slb_finish_load
1189be65
PM
82END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
83 b slb_finish_load_1T
3c726f8d
BH
84
850: /* user address: proto-VSID = context << 15 | ESID. First check
86 * if the address is within the boundaries of the user region
87 */
88 srdi. r9,r10,USER_ESID_BITS
1da177e4
LT
89 bne- 8f /* invalid ea bits set */
90
d0f13e3c
BH
91
92 /* when using slices, we extract the psize off the slice bitmaps
93 * and then we need to get the sllp encoding off the mmu_psize_defs
94 * array.
95 *
96 * XXX This is a bit inefficient especially for the normal case,
97 * so we should try to implement a fast path for the standard page
98 * size using the old sllp value so we avoid the array. We cannot
99 * really do dynamic patching unfortunately as processes might flip
100 * between 4k and 64k standard page size
101 */
102#ifdef CONFIG_PPC_MM_SLICES
7d24f0b8
DG
103 cmpldi r10,16
104
d0f13e3c
BH
105 /* Get the slice index * 4 in r11 and matching slice size mask in r9 */
106 ld r9,PACALOWSLICESPSIZE(r13)
107 sldi r11,r10,2
7d24f0b8 108 blt 5f
d0f13e3c
BH
109 ld r9,PACAHIGHSLICEPSIZE(r13)
110 srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT - 2)
111 andi. r11,r11,0x3c
7d24f0b8 112
d0f13e3c
BH
1135: /* Extract the psize and multiply to get an array offset */
114 srd r9,r9,r11
115 andi. r9,r9,0xf
116 mulli r9,r9,MMUPSIZEDEFSIZE
c594adad 117
d0f13e3c
BH
118 /* Now get to the array and obtain the sllp
119 */
120 ld r11,PACATOC(r13)
121 ld r11,mmu_psize_defs@got(r11)
122 add r11,r11,r9
123 ld r11,MMUPSIZESLLP(r11)
124 ori r11,r11,SLB_VSID_USER
125#else
126 /* paca context sllp already contains the SLB_VSID_USER bits */
bf72aeba 127 lhz r11,PACACONTEXTSLLP(r13)
d0f13e3c
BH
128#endif /* CONFIG_PPC_MM_SLICES */
129
3c726f8d 130 ld r9,PACACONTEXTID(r13)
1189be65
PM
131BEGIN_FTR_SECTION
132 cmpldi r10,0x1000
133END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
3c726f8d 134 rldimi r10,r9,USER_ESID_BITS,0
1189be65
PM
135BEGIN_FTR_SECTION
136 bge slb_finish_load_1T
137END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
3c726f8d
BH
138 b slb_finish_load
139
1408: /* invalid EA */
141 li r10,0 /* BAD_VSID */
142 li r11,SLB_VSID_USER /* flags don't much matter */
143 b slb_finish_load
144
145#ifdef __DISABLED__
146
147/* void slb_allocate_user(unsigned long ea);
148 *
149 * Create an SLB entry for the given EA (user or kernel).
150 * r3 = faulting address, r13 = PACA
151 * r9, r10, r11 are clobbered by this function
152 * No other registers are examined or changed.
153 *
154 * It is called with translation enabled in order to be able to walk the
155 * page tables. This is not currently used.
156 */
157_GLOBAL(slb_allocate_user)
158 /* r3 = faulting address */
159 srdi r10,r3,28 /* get esid */
160
161 crset 4*cr7+lt /* set "user" flag for later */
162
163 /* check if we fit in the range covered by the pagetables*/
164 srdi. r9,r3,PGTABLE_EADDR_SIZE
165 crnot 4*cr0+eq,4*cr0+eq
166 beqlr
1da177e4 167
3c726f8d
BH
168 /* now we need to get to the page tables in order to get the page
169 * size encoding from the PMD. In the future, we'll be able to deal
170 * with 1T segments too by getting the encoding from the PGD instead
171 */
172 ld r9,PACAPGDIR(r13)
173 cmpldi cr0,r9,0
174 beqlr
175 rlwinm r11,r10,8,25,28
176 ldx r9,r9,r11 /* get pgd_t */
177 cmpldi cr0,r9,0
178 beqlr
179 rlwinm r11,r10,3,17,28
180 ldx r9,r9,r11 /* get pmd_t */
181 cmpldi cr0,r9,0
182 beqlr
183
184 /* build vsid flags */
185 andi. r11,r9,SLB_VSID_LLP
186 ori r11,r11,SLB_VSID_USER
187
188 /* get context to calculate proto-VSID */
319e76a1 189 ld r9,PACACONTEXTID(r13)
3c726f8d
BH
190 rldimi r10,r9,USER_ESID_BITS,0
191
192 /* fall through slb_finish_load */
193
194#endif /* __DISABLED__ */
1da177e4 195
1da177e4 196
3c726f8d
BH
197/*
198 * Finish loading of an SLB entry and return
199 *
b5666f70 200 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
3c726f8d
BH
201 */
202slb_finish_load:
1189be65 203 ASM_VSID_SCRAMBLE(r10,r9,256M)
3c726f8d
BH
204 rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */
205
206 /* r3 = EA, r11 = VSID data */
207 /*
208 * Find a slot, round robin. Previously we tried to find a
209 * free slot first but that took too long. Unfortunately we
210 * dont have any LRU information to help us choose a slot.
211 */
212#ifdef CONFIG_PPC_ISERIES
3f639ee8 213BEGIN_FW_FTR_SECTION
3c726f8d
BH
214 /*
215 * On iSeries, the "bolted" stack segment can be cast out on
216 * shared processor switch so we need to check for a miss on
217 * it and restore it to the right slot.
218 */
219 ld r9,PACAKSAVE(r13)
220 clrrdi r9,r9,28
221 clrrdi r3,r3,28
222 li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */
223 cmpld r9,r3
224 beq 3f
3f639ee8 225END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
3c726f8d
BH
226#endif /* CONFIG_PPC_ISERIES */
227
1189be65 2287: ld r10,PACASTABRR(r13)
3c726f8d
BH
229 addi r10,r10,1
230 /* use a cpu feature mask if we ever change our slb size */
231 cmpldi r10,SLB_NUM_ENTRIES
232
233 blt+ 4f
234 li r10,SLB_NUM_BOLTED
235
2364:
237 std r10,PACASTABRR(r13)
238
2393:
240 rldimi r3,r10,0,36 /* r3= EA[0:35] | entry */
241 oris r10,r3,SLB_ESID_V@h /* r3 |= SLB_ESID_V */
242
243 /* r3 = ESID data, r11 = VSID data */
1da177e4
LT
244
245 /*
246 * No need for an isync before or after this slbmte. The exception
247 * we enter with and the rfid we exit with are context synchronizing.
248 */
249 slbmte r11,r10
250
3c726f8d
BH
251 /* we're done for kernel addresses */
252 crclr 4*cr0+eq /* set result to "success" */
253 bgelr cr7
1da177e4
LT
254
255 /* Update the slb cache */
256 lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
257 cmpldi r3,SLB_CACHE_ENTRIES
258 bge 1f
259
260 /* still room in the slb cache */
261 sldi r11,r3,1 /* r11 = offset * sizeof(u16) */
262 rldicl r10,r10,36,28 /* get low 16 bits of the ESID */
263 add r11,r11,r13 /* r11 = (u16 *)paca + offset */
264 sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
265 addi r3,r3,1 /* offset++ */
266 b 2f
2671: /* offset >= SLB_CACHE_ENTRIES */
268 li r3,SLB_CACHE_ENTRIES+1
2692:
270 sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
3c726f8d 271 crclr 4*cr0+eq /* set result to "success" */
1da177e4
LT
272 blr
273
1189be65
PM
274/*
275 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
276 * We assume legacy iSeries will never have 1T segments.
277 *
278 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9
279 */
280slb_finish_load_1T:
281 srdi r10,r10,40-28 /* get 1T ESID */
282 ASM_VSID_SCRAMBLE(r10,r9,1T)
283 rldimi r11,r10,SLB_VSID_SHIFT_1T,16 /* combine VSID and flags */
284 li r10,MMU_SEGSIZE_1T
285 rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
286
287 /* r3 = EA, r11 = VSID data */
288 clrrdi r3,r3,SID_SHIFT_1T /* clear out non-ESID bits */
289 b 7b
290