Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * PowerPC64 SLB support. | |
3 | * | |
4 | * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM | |
5 | * Based on earlier code writteh by: | |
6 | * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com | |
7 | * Copyright (c) 2001 Dave Engebretsen | |
8 | * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM | |
9 | * | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU General Public License | |
13 | * as published by the Free Software Foundation; either version | |
14 | * 2 of the License, or (at your option) any later version. | |
15 | */ | |
16 | ||
3c726f8d BH |
17 | #undef DEBUG |
18 | ||
1da177e4 LT |
19 | #include <asm/pgtable.h> |
20 | #include <asm/mmu.h> | |
21 | #include <asm/mmu_context.h> | |
22 | #include <asm/paca.h> | |
23 | #include <asm/cputable.h> | |
3c726f8d | 24 | #include <asm/cacheflush.h> |
2f6093c8 MN |
25 | #include <asm/smp.h> |
26 | #include <linux/compiler.h> | |
3c726f8d BH |
27 | |
28 | #ifdef DEBUG | |
29 | #define DBG(fmt...) udbg_printf(fmt) | |
30 | #else | |
31 | #define DBG(fmt...) | |
32 | #endif | |
1da177e4 | 33 | |
3c726f8d BH |
34 | extern void slb_allocate_realmode(unsigned long ea); |
35 | extern void slb_allocate_user(unsigned long ea); | |
36 | ||
37 | static void slb_allocate(unsigned long ea) | |
38 | { | |
39 | /* Currently, we do real mode for all SLBs including user, but | |
40 | * that will change if we bring back dynamic VSIDs | |
41 | */ | |
42 | slb_allocate_realmode(ea); | |
43 | } | |
1da177e4 LT |
44 | |
45 | static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot) | |
46 | { | |
47 | return (ea & ESID_MASK) | SLB_ESID_V | slot; | |
48 | } | |
49 | ||
50 | static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags) | |
51 | { | |
52 | return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; | |
53 | } | |
54 | ||
2f6093c8 MN |
55 | static inline void slb_shadow_update(unsigned long esid, unsigned long vsid, |
56 | unsigned long entry) | |
1da177e4 | 57 | { |
2f6093c8 MN |
58 | /* |
59 | * Clear the ESID first so the entry is not valid while we are | |
60 | * updating it. | |
61 | */ | |
62 | get_slb_shadow()->save_area[entry].esid = 0; | |
63 | barrier(); | |
64 | get_slb_shadow()->save_area[entry].vsid = vsid; | |
65 | barrier(); | |
66 | get_slb_shadow()->save_area[entry].esid = esid; | |
67 | ||
68 | } | |
69 | ||
70 | static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, | |
71 | unsigned long entry) | |
72 | { | |
73 | /* | |
74 | * Updating the shadow buffer before writing the SLB ensures | |
75 | * we don't get a stale entry here if we get preempted by PHYP | |
76 | * between these two statements. | |
77 | */ | |
78 | slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags), | |
79 | entry); | |
80 | ||
1da177e4 LT |
81 | asm volatile("slbmte %0,%1" : |
82 | : "r" (mk_vsid_data(ea, flags)), | |
83 | "r" (mk_esid_data(ea, entry)) | |
84 | : "memory" ); | |
85 | } | |
86 | ||
bf72aeba | 87 | void slb_flush_and_rebolt(void) |
1da177e4 LT |
88 | { |
89 | /* If you change this make sure you change SLB_NUM_BOLTED | |
90 | * appropriately too. */ | |
bf72aeba | 91 | unsigned long linear_llp, vmalloc_llp, lflags, vflags; |
1da177e4 LT |
92 | unsigned long ksp_esid_data; |
93 | ||
94 | WARN_ON(!irqs_disabled()); | |
95 | ||
3c726f8d | 96 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; |
bf72aeba | 97 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; |
3c726f8d | 98 | lflags = SLB_VSID_KERNEL | linear_llp; |
bf72aeba | 99 | vflags = SLB_VSID_KERNEL | vmalloc_llp; |
1da177e4 LT |
100 | |
101 | ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); | |
b5666f70 | 102 | if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) |
1da177e4 LT |
103 | ksp_esid_data &= ~SLB_ESID_V; |
104 | ||
2f6093c8 MN |
105 | /* Only third entry (stack) may change here so only resave that */ |
106 | slb_shadow_update(ksp_esid_data, | |
107 | mk_vsid_data(ksp_esid_data, lflags), 2); | |
108 | ||
1da177e4 LT |
109 | /* We need to do this all in asm, so we're sure we don't touch |
110 | * the stack between the slbia and rebolting it. */ | |
111 | asm volatile("isync\n" | |
112 | "slbia\n" | |
113 | /* Slot 1 - first VMALLOC segment */ | |
114 | "slbmte %0,%1\n" | |
115 | /* Slot 2 - kernel stack */ | |
116 | "slbmte %2,%3\n" | |
117 | "isync" | |
14c89e7f DG |
118 | :: "r"(mk_vsid_data(VMALLOC_START, vflags)), |
119 | "r"(mk_esid_data(VMALLOC_START, 1)), | |
3c726f8d | 120 | "r"(mk_vsid_data(ksp_esid_data, lflags)), |
1da177e4 LT |
121 | "r"(ksp_esid_data) |
122 | : "memory"); | |
123 | } | |
124 | ||
125 | /* Flush all user entries from the segment table of the current processor. */ | |
126 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |
127 | { | |
128 | unsigned long offset = get_paca()->slb_cache_ptr; | |
129 | unsigned long esid_data = 0; | |
130 | unsigned long pc = KSTK_EIP(tsk); | |
131 | unsigned long stack = KSTK_ESP(tsk); | |
132 | unsigned long unmapped_base; | |
133 | ||
134 | if (offset <= SLB_CACHE_ENTRIES) { | |
135 | int i; | |
136 | asm volatile("isync" : : : "memory"); | |
137 | for (i = 0; i < offset; i++) { | |
14b34661 DG |
138 | esid_data = ((unsigned long)get_paca()->slb_cache[i] |
139 | << SID_SHIFT) | SLBIE_C; | |
1da177e4 LT |
140 | asm volatile("slbie %0" : : "r" (esid_data)); |
141 | } | |
142 | asm volatile("isync" : : : "memory"); | |
143 | } else { | |
144 | slb_flush_and_rebolt(); | |
145 | } | |
146 | ||
147 | /* Workaround POWER5 < DD2.1 issue */ | |
148 | if (offset == 1 || offset > SLB_CACHE_ENTRIES) | |
149 | asm volatile("slbie %0" : : "r" (esid_data)); | |
150 | ||
151 | get_paca()->slb_cache_ptr = 0; | |
152 | get_paca()->context = mm->context; | |
153 | ||
154 | /* | |
155 | * preload some userspace segments into the SLB. | |
156 | */ | |
157 | if (test_tsk_thread_flag(tsk, TIF_32BIT)) | |
158 | unmapped_base = TASK_UNMAPPED_BASE_USER32; | |
159 | else | |
160 | unmapped_base = TASK_UNMAPPED_BASE_USER64; | |
161 | ||
51fae6de | 162 | if (is_kernel_addr(pc)) |
1da177e4 LT |
163 | return; |
164 | slb_allocate(pc); | |
165 | ||
166 | if (GET_ESID(pc) == GET_ESID(stack)) | |
167 | return; | |
168 | ||
51fae6de | 169 | if (is_kernel_addr(stack)) |
1da177e4 LT |
170 | return; |
171 | slb_allocate(stack); | |
172 | ||
173 | if ((GET_ESID(pc) == GET_ESID(unmapped_base)) | |
174 | || (GET_ESID(stack) == GET_ESID(unmapped_base))) | |
175 | return; | |
176 | ||
51fae6de | 177 | if (is_kernel_addr(unmapped_base)) |
1da177e4 LT |
178 | return; |
179 | slb_allocate(unmapped_base); | |
180 | } | |
181 | ||
3c726f8d BH |
182 | static inline void patch_slb_encoding(unsigned int *insn_addr, |
183 | unsigned int immed) | |
184 | { | |
185 | /* Assume the instruction had a "0" immediate value, just | |
186 | * "or" in the new value | |
187 | */ | |
188 | *insn_addr |= immed; | |
189 | flush_icache_range((unsigned long)insn_addr, 4+ | |
190 | (unsigned long)insn_addr); | |
191 | } | |
192 | ||
1da177e4 LT |
193 | void slb_initialize(void) |
194 | { | |
bf72aeba | 195 | unsigned long linear_llp, vmalloc_llp, io_llp; |
3c726f8d BH |
196 | static int slb_encoding_inited; |
197 | extern unsigned int *slb_miss_kernel_load_linear; | |
bf72aeba | 198 | extern unsigned int *slb_miss_kernel_load_io; |
3c726f8d BH |
199 | #ifdef CONFIG_HUGETLB_PAGE |
200 | extern unsigned int *slb_miss_user_load_huge; | |
201 | unsigned long huge_llp; | |
202 | ||
203 | huge_llp = mmu_psize_defs[mmu_huge_psize].sllp; | |
204 | #endif | |
205 | ||
206 | /* Prepare our SLB miss handler based on our page size */ | |
207 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; | |
bf72aeba PM |
208 | io_llp = mmu_psize_defs[mmu_io_psize].sllp; |
209 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; | |
210 | get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; | |
211 | ||
3c726f8d BH |
212 | if (!slb_encoding_inited) { |
213 | slb_encoding_inited = 1; | |
214 | patch_slb_encoding(slb_miss_kernel_load_linear, | |
215 | SLB_VSID_KERNEL | linear_llp); | |
bf72aeba PM |
216 | patch_slb_encoding(slb_miss_kernel_load_io, |
217 | SLB_VSID_KERNEL | io_llp); | |
3c726f8d BH |
218 | |
219 | DBG("SLB: linear LLP = %04x\n", linear_llp); | |
bf72aeba | 220 | DBG("SLB: io LLP = %04x\n", io_llp); |
3c726f8d BH |
221 | #ifdef CONFIG_HUGETLB_PAGE |
222 | patch_slb_encoding(slb_miss_user_load_huge, | |
223 | SLB_VSID_USER | huge_llp); | |
224 | DBG("SLB: huge LLP = %04x\n", huge_llp); | |
225 | #endif | |
226 | } | |
227 | ||
1da177e4 LT |
228 | /* On iSeries the bolted entries have already been set up by |
229 | * the hypervisor from the lparMap data in head.S */ | |
230 | #ifndef CONFIG_PPC_ISERIES | |
3c726f8d BH |
231 | { |
232 | unsigned long lflags, vflags; | |
1da177e4 | 233 | |
3c726f8d | 234 | lflags = SLB_VSID_KERNEL | linear_llp; |
bf72aeba | 235 | vflags = SLB_VSID_KERNEL | vmalloc_llp; |
1da177e4 | 236 | |
3c726f8d BH |
237 | /* Invalidate the entire SLB (even slot 0) & all the ERATS */ |
238 | asm volatile("isync":::"memory"); | |
239 | asm volatile("slbmte %0,%0"::"r" (0) : "memory"); | |
1da177e4 | 240 | asm volatile("isync; slbia; isync":::"memory"); |
2f6093c8 | 241 | create_shadowed_slbe(PAGE_OFFSET, lflags, 0); |
3c726f8d | 242 | |
2f6093c8 | 243 | create_shadowed_slbe(VMALLOC_START, vflags, 1); |
3c726f8d | 244 | |
1da177e4 LT |
245 | /* We don't bolt the stack for the time being - we're in boot, |
246 | * so the stack is in the bolted segment. By the time it goes | |
247 | * elsewhere, we'll call _switch() which will bolt in the new | |
248 | * one. */ | |
249 | asm volatile("isync":::"memory"); | |
3c726f8d BH |
250 | } |
251 | #endif /* CONFIG_PPC_ISERIES */ | |
1da177e4 LT |
252 | |
253 | get_paca()->stab_rr = SLB_NUM_BOLTED; | |
254 | } |