2 * Low-level SLB routines
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
6 * Based on earlier C version:
7 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
8 * Copyright (c) 2001 Dave Engebretsen
9 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <asm/processor.h>
18 #include <asm/ppc_asm.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/cputable.h>
23 #include <asm/pgtable.h>
24 #include <asm/firmware.h>
27 * This macro generates asm code to compute the VSID scramble
28 * function. Used in slb_allocate() and do_stab_bolted. The function
29 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
31 * rt = register containing the proto-VSID and into which the
33 * rx = scratch register (clobbered)
36 * - rt and rx must be different registers
37 * - The answer will end up in the low VSID_BITS bits of rt. The higher
38 * bits may contain other garbage, so you may need to mask the
41 #define ASM_VSID_SCRAMBLE(rt, rx, rf, size) \
42 lis rx,VSID_MULTIPLIER_##size@h; \
43 ori rx,rx,VSID_MULTIPLIER_##size@l; \
44 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
46 * powermac get slb fault before feature fixup, so make 65 bit part \
47 * the default part of feature fixup \
49 BEGIN_MMU_FTR_SECTION \
50 srdi rx,rt,VSID_BITS_65_##size; \
51 clrldi rt,rt,(64-VSID_BITS_65_##size); \
54 srdi rx,rx,VSID_BITS_65_##size; \
56 rldimi rf,rt,SLB_VSID_SHIFT_##size,(64 - (SLB_VSID_SHIFT_##size + VSID_BITS_65_##size)); \
57 MMU_FTR_SECTION_ELSE \
58 srdi rx,rt,VSID_BITS_##size; \
59 clrldi rt,rt,(64-VSID_BITS_##size); \
60 add rt,rt,rx; /* add high and low bits */ \
62 srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \
64 rldimi rf,rt,SLB_VSID_SHIFT_##size,(64 - (SLB_VSID_SHIFT_##size + VSID_BITS_##size)); \
65 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_68_BIT_VA)
68 /* void slb_allocate(unsigned long ea);
70 * Create an SLB entry for the given EA (user or kernel).
71 * r3 = faulting address, r13 = PACA
72 * r9, r10, r11 are clobbered by this function
74 * No other registers are examined or changed.
78 * Check if the address falls within the range of the first context, or
79 * if we may need to handle multi context. For the first context we
80 * allocate the slb entry via the fast path below. For large address we
81 * branch out to C-code and see if additional contexts have been
84 * (ea & ~REGION_MASK) >= (1ull << MAX_EA_BITS_PER_CONTEXT)
86 rldicr. r9,r3,4,(63 - MAX_EA_BITS_PER_CONTEXT - 4)
89 srdi r9,r3,60 /* get region */
90 srdi r10,r3,SID_SHIFT /* get esid */
91 cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
93 /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
94 blt cr7,0f /* user or kernel? */
96 /* Check if hitting the linear mapping or some other kernel space
100 /* Linear mapping encoding bits, the "li" instruction below will
101 * be patched by the kernel at boot
103 .globl slb_miss_kernel_load_linear
104 slb_miss_kernel_load_linear:
107 * context = (ea >> 60) - (0xc - 1)
110 subi r9,r9,KERNEL_REGION_CONTEXT_OFFSET
114 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
115 b .Lslb_finish_load_1T
118 #ifdef CONFIG_SPARSEMEM_VMEMMAP
121 /* Check virtual memmap region. To be patched at kernel boot */
122 .globl slb_miss_kernel_load_vmemmap
123 slb_miss_kernel_load_vmemmap:
127 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
130 * r10 contains the ESID, which is the original faulting EA shifted
131 * right by 28 bits. We need to compare that with (H_VMALLOC_END >> 28)
132 * which is 0xd00038000. That can't be used as an immediate, even if we
133 * ignored the 0xd, so we have to load it into a register, and we only
134 * have one register free. So we must load all of (H_VMALLOC_END >> 28)
135 * into a register and compare ESID against that.
137 lis r11,(H_VMALLOC_END >> 32)@h // r11 = 0xffffffffd0000000
138 ori r11,r11,(H_VMALLOC_END >> 32)@l // r11 = 0xffffffffd0003800
139 // Rotate left 4, then mask with 0xffffffff0
140 rldic r11,r11,4,28 // r11 = 0xd00038000
141 cmpld r10,r11 // if r10 >= r11
142 bge 5f // goto io_mapping
145 * vmalloc mapping gets the encoding from the PACA as the mapping
146 * can be demoted from 64K -> 4K dynamically on some machines.
148 lhz r11,PACAVMALLOCSLLP(r13)
152 .globl slb_miss_kernel_load_io
153 slb_miss_kernel_load_io:
157 * context = (ea >> 60) - (0xc - 1)
160 subi r9,r9,KERNEL_REGION_CONTEXT_OFFSET
164 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
165 b .Lslb_finish_load_1T
168 * For userspace addresses, make sure this is region 0.
173 * user space make sure we are within the allowed limit
175 ld r11,PACA_SLB_ADDR_LIMIT(r13)
179 /* when using slices, we extract the psize off the slice bitmaps
180 * and then we need to get the sllp encoding off the mmu_psize_defs
183 * XXX This is a bit inefficient especially for the normal case,
184 * so we should try to implement a fast path for the standard page
185 * size using the old sllp value so we avoid the array. We cannot
186 * really do dynamic patching unfortunately as processes might flip
187 * between 4k and 64k standard page size
189 #ifdef CONFIG_PPC_MM_SLICES
192 /* below SLICE_LOW_TOP */
196 * r9 is get_paca()->context.high_slices_psize[index], r11 is mask_index
198 srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT + 1) /* index */
199 addi r9,r11,PACAHIGHSLICEPSIZE
200 lbzx r9,r13,r9 /* r9 is hpsizes[r11] */
201 /* r11 = (r10 >> (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)) & 0x1 */
202 rldicl r11,r10,(64 - (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)),63
208 * r9 is get_paca()->context.low_slices_psize[index], r11 is mask_index
210 srdi r11,r10,1 /* index */
211 addi r9,r11,PACALOWSLICESPSIZE
212 lbzx r9,r13,r9 /* r9 is lpsizes[r11] */
213 rldicl r11,r10,0,63 /* r11 = r10 & 0x1 */
215 sldi r11,r11,2 /* index * 4 */
216 /* Extract the psize and multiply to get an array offset */
219 mulli r9,r9,MMUPSIZEDEFSIZE
221 /* Now get to the array and obtain the sllp
224 ld r11,mmu_psize_defs@got(r11)
226 ld r11,MMUPSIZESLLP(r11)
227 ori r11,r11,SLB_VSID_USER
229 /* paca context sllp already contains the SLB_VSID_USER bits */
230 lhz r11,PACACONTEXTSLLP(r13)
231 #endif /* CONFIG_PPC_MM_SLICES */
233 ld r9,PACACONTEXTID(r13)
236 bge .Lslb_finish_load_1T
237 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
240 8: /* invalid EA - return an error indication */
241 crset 4*cr0+eq /* indicate failure */
245 * Finish loading of an SLB entry and return
247 * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
250 rldimi r10,r9,ESID_BITS,0
251 ASM_VSID_SCRAMBLE(r10,r9,r11,256M)
252 /* r3 = EA, r11 = VSID data */
254 * Find a slot, round robin. Previously we tried to find a
255 * free slot first but that took too long. Unfortunately we
256 * dont have any LRU information to help us choose a slot.
261 /* slb_finish_load_1T continues here. r9=EA with non-ESID bits clear */
262 7: ld r10,PACASTABRR(r13)
264 /* This gets soft patched on boot. */
265 .globl slb_compare_rr_to_size
266 slb_compare_rr_to_size:
270 li r10,SLB_NUM_BOLTED
273 std r10,PACASTABRR(r13)
276 rldimi r9,r10,0,36 /* r9 = EA[0:35] | entry */
277 oris r10,r9,SLB_ESID_V@h /* r10 = r9 | SLB_ESID_V */
279 /* r9 = ESID data, r11 = VSID data */
282 * No need for an isync before or after this slbmte. The exception
283 * we enter with and the rfid we exit with are context synchronizing.
287 /* we're done for kernel addresses */
288 crclr 4*cr0+eq /* set result to "success" */
291 /* Update the slb cache */
292 lhz r9,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
293 cmpldi r9,SLB_CACHE_ENTRIES
296 /* still room in the slb cache */
297 sldi r11,r9,2 /* r11 = offset * sizeof(u32) */
298 srdi r10,r10,28 /* get the 36 bits of the ESID */
299 add r11,r11,r13 /* r11 = (u32 *)paca + offset */
300 stw r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
301 addi r9,r9,1 /* offset++ */
303 1: /* offset >= SLB_CACHE_ENTRIES */
304 li r9,SLB_CACHE_ENTRIES+1
306 sth r9,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
307 crclr 4*cr0+eq /* set result to "success" */
311 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
313 * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
315 .Lslb_finish_load_1T:
316 srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */
317 rldimi r10,r9,ESID_BITS_1T,0
318 ASM_VSID_SCRAMBLE(r10,r9,r11,1T)
320 li r10,MMU_SEGSIZE_1T
321 rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
323 /* r3 = EA, r11 = VSID data */
324 clrrdi r9,r3,SID_SHIFT_1T /* clear out non-ESID bits */
328 _ASM_NOKPROBE_SYMBOL(slb_allocate)
329 _ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_linear)
330 _ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_io)
331 _ASM_NOKPROBE_SYMBOL(slb_compare_rr_to_size)
332 #ifdef CONFIG_SPARSEMEM_VMEMMAP
333 _ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_vmemmap)