Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * PowerPC64 Segment Translation Support. | |
3 | * | |
4 | * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com | |
5 | * Copyright (c) 2001 Dave Engebretsen | |
6 | * | |
7 | * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU General Public License | |
11 | * as published by the Free Software Foundation; either version | |
12 | * 2 of the License, or (at your option) any later version. | |
13 | */ | |
14 | ||
95f72d1e | 15 | #include <linux/memblock.h> |
d9b2b2a2 | 16 | |
1da177e4 LT |
17 | #include <asm/pgtable.h> |
18 | #include <asm/mmu.h> | |
19 | #include <asm/mmu_context.h> | |
20 | #include <asm/paca.h> | |
21 | #include <asm/cputable.h> | |
d9b2b2a2 | 22 | #include <asm/prom.h> |
1da177e4 | 23 | |
1f8d419e DG |
24 | struct stab_entry { |
25 | unsigned long esid_data; | |
26 | unsigned long vsid_data; | |
27 | }; | |
28 | ||
1da177e4 | 29 | #define NR_STAB_CACHE_ENTRIES 8 |
09de9ff8 | 30 | static DEFINE_PER_CPU(long, stab_cache_ptr); |
204fba4a | 31 | static DEFINE_PER_CPU(long [NR_STAB_CACHE_ENTRIES], stab_cache); |
1da177e4 LT |
32 | |
33 | /* | |
34 | * Create a segment table entry for the given esid/vsid pair. | |
35 | */ | |
36 | static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid) | |
37 | { | |
38 | unsigned long esid_data, vsid_data; | |
39 | unsigned long entry, group, old_esid, castout_entry, i; | |
40 | unsigned int global_entry; | |
41 | struct stab_entry *ste, *castout_ste; | |
b5666f70 | 42 | unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET; |
1da177e4 LT |
43 | |
44 | vsid_data = vsid << STE_VSID_SHIFT; | |
45 | esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V; | |
46 | if (! kernel_segment) | |
47 | esid_data |= STE_ESID_KS; | |
48 | ||
49 | /* Search the primary group first. */ | |
50 | global_entry = (esid & 0x1f) << 3; | |
51 | ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7)); | |
52 | ||
53 | /* Find an empty entry, if one exists. */ | |
54 | for (group = 0; group < 2; group++) { | |
55 | for (entry = 0; entry < 8; entry++, ste++) { | |
56 | if (!(ste->esid_data & STE_ESID_V)) { | |
57 | ste->vsid_data = vsid_data; | |
74a0ba61 | 58 | eieio(); |
1da177e4 LT |
59 | ste->esid_data = esid_data; |
60 | return (global_entry | entry); | |
61 | } | |
62 | } | |
63 | /* Now search the secondary group. */ | |
64 | global_entry = ((~esid) & 0x1f) << 3; | |
65 | ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7)); | |
66 | } | |
67 | ||
68 | /* | |
69 | * Could not find empty entry, pick one with a round robin selection. | |
70 | * Search all entries in the two groups. | |
71 | */ | |
72 | castout_entry = get_paca()->stab_rr; | |
73 | for (i = 0; i < 16; i++) { | |
74 | if (castout_entry < 8) { | |
75 | global_entry = (esid & 0x1f) << 3; | |
76 | ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7)); | |
77 | castout_ste = ste + castout_entry; | |
78 | } else { | |
79 | global_entry = ((~esid) & 0x1f) << 3; | |
80 | ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7)); | |
81 | castout_ste = ste + (castout_entry - 8); | |
82 | } | |
83 | ||
84 | /* Dont cast out the first kernel segment */ | |
b5666f70 | 85 | if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET) |
1da177e4 LT |
86 | break; |
87 | ||
88 | castout_entry = (castout_entry + 1) & 0xf; | |
89 | } | |
90 | ||
91 | get_paca()->stab_rr = (castout_entry + 1) & 0xf; | |
92 | ||
93 | /* Modify the old entry to the new value. */ | |
94 | ||
95 | /* Force previous translations to complete. DRENG */ | |
96 | asm volatile("isync" : : : "memory"); | |
97 | ||
98 | old_esid = castout_ste->esid_data >> SID_SHIFT; | |
99 | castout_ste->esid_data = 0; /* Invalidate old entry */ | |
100 | ||
101 | asm volatile("sync" : : : "memory"); /* Order update */ | |
102 | ||
103 | castout_ste->vsid_data = vsid_data; | |
74a0ba61 | 104 | eieio(); /* Order update */ |
1da177e4 LT |
105 | castout_ste->esid_data = esid_data; |
106 | ||
107 | asm volatile("slbie %0" : : "r" (old_esid << SID_SHIFT)); | |
108 | /* Ensure completion of slbie */ | |
109 | asm volatile("sync" : : : "memory"); | |
110 | ||
111 | return (global_entry | (castout_entry & 0x7)); | |
112 | } | |
113 | ||
114 | /* | |
115 | * Allocate a segment table entry for the given ea and mm | |
116 | */ | |
117 | static int __ste_allocate(unsigned long ea, struct mm_struct *mm) | |
118 | { | |
119 | unsigned long vsid; | |
120 | unsigned char stab_entry; | |
121 | unsigned long offset; | |
122 | ||
123 | /* Kernel or user address? */ | |
51fae6de | 124 | if (is_kernel_addr(ea)) { |
1189be65 | 125 | vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M); |
1da177e4 LT |
126 | } else { |
127 | if ((ea >= TASK_SIZE_USER64) || (! mm)) | |
128 | return 1; | |
129 | ||
1189be65 | 130 | vsid = get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M); |
1da177e4 LT |
131 | } |
132 | ||
133 | stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid); | |
134 | ||
51fae6de | 135 | if (!is_kernel_addr(ea)) { |
1da177e4 LT |
136 | offset = __get_cpu_var(stab_cache_ptr); |
137 | if (offset < NR_STAB_CACHE_ENTRIES) | |
138 | __get_cpu_var(stab_cache[offset++]) = stab_entry; | |
139 | else | |
140 | offset = NR_STAB_CACHE_ENTRIES+1; | |
141 | __get_cpu_var(stab_cache_ptr) = offset; | |
142 | ||
143 | /* Order update */ | |
144 | asm volatile("sync":::"memory"); | |
145 | } | |
146 | ||
147 | return 0; | |
148 | } | |
149 | ||
150 | int ste_allocate(unsigned long ea) | |
151 | { | |
152 | return __ste_allocate(ea, current->mm); | |
153 | } | |
154 | ||
155 | /* | |
156 | * Do the segment table work for a context switch: flush all user | |
157 | * entries from the table, then preload some probably useful entries | |
158 | * for the new task | |
159 | */ | |
160 | void switch_stab(struct task_struct *tsk, struct mm_struct *mm) | |
161 | { | |
162 | struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr; | |
163 | struct stab_entry *ste; | |
9c1e1052 | 164 | unsigned long offset; |
1da177e4 LT |
165 | unsigned long pc = KSTK_EIP(tsk); |
166 | unsigned long stack = KSTK_ESP(tsk); | |
167 | unsigned long unmapped_base; | |
168 | ||
169 | /* Force previous translations to complete. DRENG */ | |
170 | asm volatile("isync" : : : "memory"); | |
171 | ||
9c1e1052 PM |
172 | /* |
173 | * We need interrupts hard-disabled here, not just soft-disabled, | |
174 | * so that a PMU interrupt can't occur, which might try to access | |
175 | * user memory (to get a stack trace) and possible cause an STAB miss | |
176 | * which would update the stab_cache/stab_cache_ptr per-cpu variables. | |
177 | */ | |
178 | hard_irq_disable(); | |
179 | ||
180 | offset = __get_cpu_var(stab_cache_ptr); | |
1da177e4 LT |
181 | if (offset <= NR_STAB_CACHE_ENTRIES) { |
182 | int i; | |
183 | ||
184 | for (i = 0; i < offset; i++) { | |
185 | ste = stab + __get_cpu_var(stab_cache[i]); | |
186 | ste->esid_data = 0; /* invalidate entry */ | |
187 | } | |
188 | } else { | |
189 | unsigned long entry; | |
190 | ||
191 | /* Invalidate all entries. */ | |
192 | ste = stab; | |
193 | ||
194 | /* Never flush the first entry. */ | |
195 | ste += 1; | |
196 | for (entry = 1; | |
3c726f8d | 197 | entry < (HW_PAGE_SIZE / sizeof(struct stab_entry)); |
1da177e4 LT |
198 | entry++, ste++) { |
199 | unsigned long ea; | |
200 | ea = ste->esid_data & ESID_MASK; | |
51fae6de | 201 | if (!is_kernel_addr(ea)) { |
1da177e4 LT |
202 | ste->esid_data = 0; |
203 | } | |
204 | } | |
205 | } | |
206 | ||
207 | asm volatile("sync; slbia; sync":::"memory"); | |
208 | ||
209 | __get_cpu_var(stab_cache_ptr) = 0; | |
210 | ||
211 | /* Now preload some entries for the new task */ | |
212 | if (test_tsk_thread_flag(tsk, TIF_32BIT)) | |
213 | unmapped_base = TASK_UNMAPPED_BASE_USER32; | |
214 | else | |
215 | unmapped_base = TASK_UNMAPPED_BASE_USER64; | |
216 | ||
217 | __ste_allocate(pc, mm); | |
218 | ||
219 | if (GET_ESID(pc) == GET_ESID(stack)) | |
220 | return; | |
221 | ||
222 | __ste_allocate(stack, mm); | |
223 | ||
224 | if ((GET_ESID(pc) == GET_ESID(unmapped_base)) | |
225 | || (GET_ESID(stack) == GET_ESID(unmapped_base))) | |
226 | return; | |
227 | ||
228 | __ste_allocate(unmapped_base, mm); | |
229 | ||
230 | /* Order update */ | |
231 | asm volatile("sync" : : : "memory"); | |
232 | } | |
233 | ||
533f0817 DG |
234 | /* |
235 | * Allocate segment tables for secondary CPUs. These must all go in | |
236 | * the first (bolted) segment, so that do_stab_bolted won't get a | |
237 | * recursive segment miss on the segment table itself. | |
238 | */ | |
0108d3fe | 239 | void __init stabs_alloc(void) |
533f0817 DG |
240 | { |
241 | int cpu; | |
242 | ||
44ae3ab3 | 243 | if (mmu_has_feature(MMU_FTR_SLB)) |
533f0817 DG |
244 | return; |
245 | ||
0e551954 | 246 | for_each_possible_cpu(cpu) { |
533f0817 DG |
247 | unsigned long newstab; |
248 | ||
249 | if (cpu == 0) | |
250 | continue; /* stab for CPU 0 is statically allocated */ | |
251 | ||
95f72d1e | 252 | newstab = memblock_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE, |
3c726f8d | 253 | 1<<SID_SHIFT); |
b5666f70 | 254 | newstab = (unsigned long)__va(newstab); |
533f0817 | 255 | |
3c726f8d | 256 | memset((void *)newstab, 0, HW_PAGE_SIZE); |
533f0817 DG |
257 | |
258 | paca[cpu].stab_addr = newstab; | |
d88dc13a | 259 | paca[cpu].stab_real = __pa(newstab); |
fe333321 IM |
260 | printk(KERN_INFO "Segment table for CPU %d at 0x%llx " |
261 | "virtual, 0x%llx absolute\n", | |
3c726f8d | 262 | cpu, paca[cpu].stab_addr, paca[cpu].stab_real); |
533f0817 DG |
263 | } |
264 | } | |
265 | ||
1da177e4 LT |
266 | /* |
267 | * Build an entry for the base kernel segment and put it into | |
268 | * the segment table or SLB. All other segment table or SLB | |
269 | * entries are faulted in. | |
270 | */ | |
271 | void stab_initialize(unsigned long stab) | |
272 | { | |
1189be65 | 273 | unsigned long vsid = get_kernel_vsid(PAGE_OFFSET, MMU_SEGSIZE_256M); |
799d6046 | 274 | unsigned long stabreal; |
1da177e4 | 275 | |
3c726f8d | 276 | asm volatile("isync; slbia; isync":::"memory"); |
b5666f70 | 277 | make_ste(stab, GET_ESID(PAGE_OFFSET), vsid); |
1da177e4 | 278 | |
3c726f8d BH |
279 | /* Order update */ |
280 | asm volatile("sync":::"memory"); | |
799d6046 PM |
281 | |
282 | /* Set ASR */ | |
283 | stabreal = get_paca()->stab_real | 0x1ul; | |
284 | ||
799d6046 | 285 | mtspr(SPRN_ASR, stabreal); |
1da177e4 | 286 | } |