Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Low-level SLB routines |
3 | * | |
4 | * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM | |
5 | * | |
6 | * Based on earlier C version: | |
7 | * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com | |
8 | * Copyright (c) 2001 Dave Engebretsen | |
9 | * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU General Public License | |
13 | * as published by the Free Software Foundation; either version | |
14 | * 2 of the License, or (at your option) any later version. | |
15 | */ | |
16 | ||
1da177e4 | 17 | #include <asm/processor.h> |
1da177e4 | 18 | #include <asm/ppc_asm.h> |
0013a854 | 19 | #include <asm/asm-offsets.h> |
1da177e4 | 20 | #include <asm/cputable.h> |
3c726f8d BH |
21 | #include <asm/page.h> |
22 | #include <asm/mmu.h> | |
23 | #include <asm/pgtable.h> | |
3f639ee8 | 24 | #include <asm/firmware.h> |
1da177e4 | 25 | |
e6f81a92 AK |
26 | /* |
27 | * This macro generates asm code to compute the VSID scramble | |
28 | * function. Used in slb_allocate() and do_stab_bolted. The function | |
29 | * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS | |
30 | * | |
31 | * rt = register containing the proto-VSID and into which the | |
32 | * VSID will be stored | |
33 | * rx = scratch register (clobbered) | |
34 | * rf = flags | |
35 | * | |
36 | * - rt and rx must be different registers | |
37 | * - The answer will end up in the low VSID_BITS bits of rt. The higher | |
38 | * bits may contain other garbage, so you may need to mask the | |
39 | * result. | |
40 | */ | |
41 | #define ASM_VSID_SCRAMBLE(rt, rx, rf, size) \ | |
42 | lis rx,VSID_MULTIPLIER_##size@h; \ | |
43 | ori rx,rx,VSID_MULTIPLIER_##size@l; \ | |
44 | mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \ | |
45 | /* \ | |
46 | * powermac get slb fault before feature fixup, so make 65 bit part \ | |
47 | * the default part of feature fixup \ | |
48 | */ \ | |
49 | BEGIN_MMU_FTR_SECTION \ | |
50 | srdi rx,rt,VSID_BITS_65_##size; \ | |
51 | clrldi rt,rt,(64-VSID_BITS_65_##size); \ | |
52 | add rt,rt,rx; \ | |
53 | addi rx,rt,1; \ | |
54 | srdi rx,rx,VSID_BITS_65_##size; \ | |
55 | add rt,rt,rx; \ | |
56 | rldimi rf,rt,SLB_VSID_SHIFT_##size,(64 - (SLB_VSID_SHIFT_##size + VSID_BITS_65_##size)); \ | |
57 | MMU_FTR_SECTION_ELSE \ | |
58 | srdi rx,rt,VSID_BITS_##size; \ | |
59 | clrldi rt,rt,(64-VSID_BITS_##size); \ | |
60 | add rt,rt,rx; /* add high and low bits */ \ | |
61 | addi rx,rt,1; \ | |
62 | srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \ | |
63 | add rt,rt,rx; \ | |
64 | rldimi rf,rt,SLB_VSID_SHIFT_##size,(64 - (SLB_VSID_SHIFT_##size + VSID_BITS_##size)); \ | |
65 | ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_68_BIT_VA) | |
66 | ||
67 | ||
fd88b945 | 68 | /* void slb_allocate(unsigned long ea); |
1da177e4 LT |
69 | * |
70 | * Create an SLB entry for the given EA (user or kernel). | |
71 | * r3 = faulting address, r13 = PACA | |
72 | * r9, r10, r11 are clobbered by this function | |
d59afffd | 73 | * r3 is preserved. |
1da177e4 LT |
74 | * No other registers are examined or changed. |
75 | */ | |
fd88b945 | 76 | _GLOBAL(slb_allocate) |
c60ac569 | 77 | /* |
f384796c AK |
78 | * Check if the address falls within the range of the first context, or |
79 | * if we may need to handle multi context. For the first context we | |
80 | * allocate the slb entry via the fast path below. For large address we | |
81 | * branch out to C-code and see if additional contexts have been | |
82 | * allocated. | |
83 | * The test here is: | |
84 | * (ea & ~REGION_MASK) >= (1ull << MAX_EA_BITS_PER_CONTEXT) | |
c60ac569 | 85 | */ |
f384796c | 86 | rldicr. r9,r3,4,(63 - MAX_EA_BITS_PER_CONTEXT - 4) |
c60ac569 | 87 | bne- 8f |
1da177e4 LT |
88 | |
89 | srdi r9,r3,60 /* get region */ | |
c60ac569 | 90 | srdi r10,r3,SID_SHIFT /* get esid */ |
b5666f70 | 91 | cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ |
1da177e4 | 92 | |
b5666f70 | 93 | /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ |
1da177e4 LT |
94 | blt cr7,0f /* user or kernel? */ |
95 | ||
cec08e7a | 96 | /* Check if hitting the linear mapping or some other kernel space |
3c726f8d BH |
97 | */ |
98 | bne cr7,1f | |
99 | ||
100 | /* Linear mapping encoding bits, the "li" instruction below will | |
101 | * be patched by the kernel at boot | |
102 | */ | |
b86206e4 AB |
103 | .globl slb_miss_kernel_load_linear |
104 | slb_miss_kernel_load_linear: | |
3c726f8d | 105 | li r11,0 |
048ee099 | 106 | /* |
941711a3 | 107 | * context = (ea >> 60) - (0xc - 1) |
c60ac569 | 108 | * r9 = region id. |
048ee099 | 109 | */ |
941711a3 | 110 | subi r9,r9,KERNEL_REGION_CONTEXT_OFFSET |
c60ac569 | 111 | |
1189be65 | 112 | BEGIN_FTR_SECTION |
e471c393 | 113 | b .Lslb_finish_load |
44ae3ab3 | 114 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) |
e471c393 | 115 | b .Lslb_finish_load_1T |
3c726f8d | 116 | |
cec08e7a BH |
117 | 1: |
118 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
cec08e7a BH |
119 | cmpldi cr0,r9,0xf |
120 | bne 1f | |
941711a3 | 121 | /* Check virtual memmap region. To be patched at kernel boot */ |
b86206e4 AB |
122 | .globl slb_miss_kernel_load_vmemmap |
123 | slb_miss_kernel_load_vmemmap: | |
cec08e7a BH |
124 | li r11,0 |
125 | b 6f | |
126 | 1: | |
127 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | |
128 | ||
21a0e8c1 ME |
129 | /* |
130 | * r10 contains the ESID, which is the original faulting EA shifted | |
131 | * right by 28 bits. We need to compare that with (H_VMALLOC_END >> 28) | |
132 | * which is 0xd00038000. That can't be used as an immediate, even if we | |
133 | * ignored the 0xd, so we have to load it into a register, and we only | |
134 | * have one register free. So we must load all of (H_VMALLOC_END >> 28) | |
135 | * into a register and compare ESID against that. | |
136 | */ | |
137 | lis r11,(H_VMALLOC_END >> 32)@h // r11 = 0xffffffffd0000000 | |
138 | ori r11,r11,(H_VMALLOC_END >> 32)@l // r11 = 0xffffffffd0003800 | |
139 | // Rotate left 4, then mask with 0xffffffff0 | |
140 | rldic r11,r11,4,28 // r11 = 0xd00038000 | |
141 | cmpld r10,r11 // if r10 >= r11 | |
142 | bge 5f // goto io_mapping | |
143 | ||
b5048de0 ME |
144 | /* |
145 | * vmalloc mapping gets the encoding from the PACA as the mapping | |
146 | * can be demoted from 64K -> 4K dynamically on some machines. | |
147 | */ | |
bf72aeba | 148 | lhz r11,PACAVMALLOCSLLP(r13) |
1189be65 | 149 | b 6f |
bf72aeba | 150 | 5: |
8d8997f3 | 151 | /* IO mapping */ |
b86206e4 AB |
152 | .globl slb_miss_kernel_load_io |
153 | slb_miss_kernel_load_io: | |
3c726f8d | 154 | li r11,0 |
1189be65 | 155 | 6: |
048ee099 | 156 | /* |
941711a3 | 157 | * context = (ea >> 60) - (0xc - 1) |
c60ac569 | 158 | * r9 = region id. |
048ee099 | 159 | */ |
941711a3 | 160 | subi r9,r9,KERNEL_REGION_CONTEXT_OFFSET |
c60ac569 | 161 | |
1189be65 | 162 | BEGIN_FTR_SECTION |
e471c393 | 163 | b .Lslb_finish_load |
44ae3ab3 | 164 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) |
e471c393 | 165 | b .Lslb_finish_load_1T |
3c726f8d | 166 | |
f077aaf0 PM |
167 | 0: /* |
168 | * For userspace addresses, make sure this is region 0. | |
169 | */ | |
170 | cmpdi r9, 0 | |
bb183221 AK |
171 | bne- 8f |
172 | /* | |
173 | * user space make sure we are within the allowed limit | |
174 | */ | |
4722476b | 175 | ld r11,PACA_SLB_ADDR_LIMIT(r13) |
bb183221 AK |
176 | cmpld r3,r11 |
177 | bge- 8f | |
f077aaf0 | 178 | |
d0f13e3c BH |
179 | /* when using slices, we extract the psize off the slice bitmaps |
180 | * and then we need to get the sllp encoding off the mmu_psize_defs | |
181 | * array. | |
182 | * | |
183 | * XXX This is a bit inefficient especially for the normal case, | |
184 | * so we should try to implement a fast path for the standard page | |
185 | * size using the old sllp value so we avoid the array. We cannot | |
186 | * really do dynamic patching unfortunately as processes might flip | |
187 | * between 4k and 64k standard page size | |
188 | */ | |
189 | #ifdef CONFIG_PPC_MM_SLICES | |
7aa0727f | 190 | /* r10 have esid */ |
7d24f0b8 | 191 | cmpldi r10,16 |
7aa0727f | 192 | /* below SLICE_LOW_TOP */ |
7d24f0b8 | 193 | blt 5f |
7aa0727f AK |
194 | /* |
195 | * Handle hpsizes, | |
196 | * r9 is get_paca()->context.high_slices_psize[index], r11 is mask_index | |
197 | */ | |
198 | srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT + 1) /* index */ | |
199 | addi r9,r11,PACAHIGHSLICEPSIZE | |
200 | lbzx r9,r13,r9 /* r9 is hpsizes[r11] */ | |
201 | /* r11 = (r10 >> (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)) & 0x1 */ | |
202 | rldicl r11,r10,(64 - (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)),63 | |
203 | b 6f | |
7d24f0b8 | 204 | |
7aa0727f AK |
205 | 5: |
206 | /* | |
207 | * Handle lpsizes | |
15472423 | 208 | * r9 is get_paca()->context.low_slices_psize[index], r11 is mask_index |
7aa0727f | 209 | */ |
15472423 CL |
210 | srdi r11,r10,1 /* index */ |
211 | addi r9,r11,PACALOWSLICESPSIZE | |
212 | lbzx r9,r13,r9 /* r9 is lpsizes[r11] */ | |
213 | rldicl r11,r10,0,63 /* r11 = r10 & 0x1 */ | |
7aa0727f AK |
214 | 6: |
215 | sldi r11,r11,2 /* index * 4 */ | |
216 | /* Extract the psize and multiply to get an array offset */ | |
d0f13e3c BH |
217 | srd r9,r9,r11 |
218 | andi. r9,r9,0xf | |
219 | mulli r9,r9,MMUPSIZEDEFSIZE | |
c594adad | 220 | |
d0f13e3c BH |
221 | /* Now get to the array and obtain the sllp |
222 | */ | |
223 | ld r11,PACATOC(r13) | |
224 | ld r11,mmu_psize_defs@got(r11) | |
225 | add r11,r11,r9 | |
226 | ld r11,MMUPSIZESLLP(r11) | |
227 | ori r11,r11,SLB_VSID_USER | |
228 | #else | |
229 | /* paca context sllp already contains the SLB_VSID_USER bits */ | |
bf72aeba | 230 | lhz r11,PACACONTEXTSLLP(r13) |
d0f13e3c BH |
231 | #endif /* CONFIG_PPC_MM_SLICES */ |
232 | ||
3c726f8d | 233 | ld r9,PACACONTEXTID(r13) |
1189be65 PM |
234 | BEGIN_FTR_SECTION |
235 | cmpldi r10,0x1000 | |
e471c393 | 236 | bge .Lslb_finish_load_1T |
44ae3ab3 | 237 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) |
e471c393 | 238 | b .Lslb_finish_load |
3c726f8d | 239 | |
f0f558b1 PM |
240 | 8: /* invalid EA - return an error indication */ |
241 | crset 4*cr0+eq /* indicate failure */ | |
242 | blr | |
3c726f8d | 243 | |
3c726f8d BH |
244 | /* |
245 | * Finish loading of an SLB entry and return | |
246 | * | |
c60ac569 | 247 | * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET |
3c726f8d | 248 | */ |
e471c393 | 249 | .Lslb_finish_load: |
af81d787 | 250 | rldimi r10,r9,ESID_BITS,0 |
e6f81a92 | 251 | ASM_VSID_SCRAMBLE(r10,r9,r11,256M) |
3c726f8d BH |
252 | /* r3 = EA, r11 = VSID data */ |
253 | /* | |
254 | * Find a slot, round robin. Previously we tried to find a | |
255 | * free slot first but that took too long. Unfortunately we | |
256 | * dont have any LRU information to help us choose a slot. | |
257 | */ | |
3c726f8d | 258 | |
d59afffd NP |
259 | mr r9,r3 |
260 | ||
261 | /* slb_finish_load_1T continues here. r9=EA with non-ESID bits clear */ | |
1189be65 | 262 | 7: ld r10,PACASTABRR(r13) |
3c726f8d | 263 | addi r10,r10,1 |
584f8b71 | 264 | /* This gets soft patched on boot. */ |
b86206e4 AB |
265 | .globl slb_compare_rr_to_size |
266 | slb_compare_rr_to_size: | |
584f8b71 | 267 | cmpldi r10,0 |
3c726f8d BH |
268 | |
269 | blt+ 4f | |
270 | li r10,SLB_NUM_BOLTED | |
271 | ||
272 | 4: | |
273 | std r10,PACASTABRR(r13) | |
274 | ||
275 | 3: | |
d59afffd NP |
276 | rldimi r9,r10,0,36 /* r9 = EA[0:35] | entry */ |
277 | oris r10,r9,SLB_ESID_V@h /* r10 = r9 | SLB_ESID_V */ | |
3c726f8d | 278 | |
d59afffd | 279 | /* r9 = ESID data, r11 = VSID data */ |
1da177e4 LT |
280 | |
281 | /* | |
282 | * No need for an isync before or after this slbmte. The exception | |
283 | * we enter with and the rfid we exit with are context synchronizing. | |
284 | */ | |
285 | slbmte r11,r10 | |
286 | ||
3c726f8d BH |
287 | /* we're done for kernel addresses */ |
288 | crclr 4*cr0+eq /* set result to "success" */ | |
289 | bgelr cr7 | |
1da177e4 LT |
290 | |
291 | /* Update the slb cache */ | |
d59afffd NP |
292 | lhz r9,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */ |
293 | cmpldi r9,SLB_CACHE_ENTRIES | |
1da177e4 LT |
294 | bge 1f |
295 | ||
296 | /* still room in the slb cache */ | |
d59afffd | 297 | sldi r11,r9,2 /* r11 = offset * sizeof(u32) */ |
735cafc3 AK |
298 | srdi r10,r10,28 /* get the 36 bits of the ESID */ |
299 | add r11,r11,r13 /* r11 = (u32 *)paca + offset */ | |
300 | stw r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */ | |
d59afffd | 301 | addi r9,r9,1 /* offset++ */ |
1da177e4 LT |
302 | b 2f |
303 | 1: /* offset >= SLB_CACHE_ENTRIES */ | |
d59afffd | 304 | li r9,SLB_CACHE_ENTRIES+1 |
1da177e4 | 305 | 2: |
d59afffd | 306 | sth r9,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */ |
3c726f8d | 307 | crclr 4*cr0+eq /* set result to "success" */ |
1da177e4 LT |
308 | blr |
309 | ||
1189be65 PM |
310 | /* |
311 | * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. | |
1189be65 | 312 | * |
c60ac569 | 313 | * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9 |
1189be65 | 314 | */ |
e471c393 | 315 | .Lslb_finish_load_1T: |
c60ac569 | 316 | srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */ |
af81d787 | 317 | rldimi r10,r9,ESID_BITS_1T,0 |
e6f81a92 | 318 | ASM_VSID_SCRAMBLE(r10,r9,r11,1T) |
e6f81a92 | 319 | |
1189be65 PM |
320 | li r10,MMU_SEGSIZE_1T |
321 | rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */ | |
322 | ||
323 | /* r3 = EA, r11 = VSID data */ | |
d59afffd | 324 | clrrdi r9,r3,SID_SHIFT_1T /* clear out non-ESID bits */ |
1189be65 PM |
325 | b 7b |
326 | ||
a90e883d | 327 | |
fd88b945 | 328 | _ASM_NOKPROBE_SYMBOL(slb_allocate) |
a90e883d ME |
329 | _ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_linear) |
330 | _ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_io) | |
331 | _ASM_NOKPROBE_SYMBOL(slb_compare_rr_to_size) | |
332 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
333 | _ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_vmemmap) | |
334 | #endif |