[POWERPC] Enable NO_HZ and high res timers for pseries and ppc64 configs
[linux-2.6-block.git] / arch / powerpc / mm / slb.c
CommitLineData
1da177e4
LT
1/*
2 * PowerPC64 SLB support.
3 *
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 * Based on earlier code writteh by:
6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
7 * Copyright (c) 2001 Dave Engebretsen
8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
3c726f8d
BH
17#undef DEBUG
18
1da177e4
LT
19#include <asm/pgtable.h>
20#include <asm/mmu.h>
21#include <asm/mmu_context.h>
22#include <asm/paca.h>
23#include <asm/cputable.h>
3c726f8d 24#include <asm/cacheflush.h>
2f6093c8 25#include <asm/smp.h>
56291e19 26#include <asm/firmware.h>
2f6093c8 27#include <linux/compiler.h>
3c726f8d
BH
28
29#ifdef DEBUG
30#define DBG(fmt...) udbg_printf(fmt)
31#else
32#define DBG(fmt...)
33#endif
1da177e4 34
3c726f8d
BH
35extern void slb_allocate_realmode(unsigned long ea);
36extern void slb_allocate_user(unsigned long ea);
37
38static void slb_allocate(unsigned long ea)
39{
40 /* Currently, we do real mode for all SLBs including user, but
41 * that will change if we bring back dynamic VSIDs
42 */
43 slb_allocate_realmode(ea);
44}
1da177e4 45
1189be65
PM
46static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
47 unsigned long slot)
1da177e4 48{
1189be65
PM
49 unsigned long mask;
50
51 mask = (ssize == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T;
52 return (ea & mask) | SLB_ESID_V | slot;
1da177e4
LT
53}
54
1189be65
PM
55#define slb_vsid_shift(ssize) \
56 ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T)
57
58static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
59 unsigned long flags)
1da177e4 60{
1189be65
PM
61 return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags |
62 ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
1da177e4
LT
63}
64
1189be65 65static inline void slb_shadow_update(unsigned long ea, int ssize,
67439b76 66 unsigned long flags,
2f6093c8 67 unsigned long entry)
1da177e4 68{
2f6093c8
MN
69 /*
70 * Clear the ESID first so the entry is not valid while we are
00efee7d
MN
71 * updating it. No write barriers are needed here, provided
72 * we only update the current CPU's SLB shadow buffer.
2f6093c8
MN
73 */
74 get_slb_shadow()->save_area[entry].esid = 0;
1189be65
PM
75 get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, ssize, flags);
76 get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, ssize, entry);
2f6093c8
MN
77}
78
edd0622b 79static inline void slb_shadow_clear(unsigned long entry)
2f6093c8 80{
edd0622b 81 get_slb_shadow()->save_area[entry].esid = 0;
1da177e4
LT
82}
83
1189be65
PM
84static inline void create_shadowed_slbe(unsigned long ea, int ssize,
85 unsigned long flags,
175587cc
PM
86 unsigned long entry)
87{
88 /*
89 * Updating the shadow buffer before writing the SLB ensures
90 * we don't get a stale entry here if we get preempted by PHYP
91 * between these two statements.
92 */
1189be65 93 slb_shadow_update(ea, ssize, flags, entry);
175587cc
PM
94
95 asm volatile("slbmte %0,%1" :
1189be65
PM
96 : "r" (mk_vsid_data(ea, ssize, flags)),
97 "r" (mk_esid_data(ea, ssize, entry))
175587cc
PM
98 : "memory" );
99}
100
bf72aeba 101void slb_flush_and_rebolt(void)
1da177e4
LT
102{
103 /* If you change this make sure you change SLB_NUM_BOLTED
104 * appropriately too. */
bf72aeba 105 unsigned long linear_llp, vmalloc_llp, lflags, vflags;
1189be65 106 unsigned long ksp_esid_data, ksp_vsid_data;
1da177e4
LT
107
108 WARN_ON(!irqs_disabled());
109
3c726f8d 110 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
bf72aeba 111 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
3c726f8d 112 lflags = SLB_VSID_KERNEL | linear_llp;
bf72aeba 113 vflags = SLB_VSID_KERNEL | vmalloc_llp;
1da177e4 114
1189be65
PM
115 ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 2);
116 if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
1da177e4 117 ksp_esid_data &= ~SLB_ESID_V;
1189be65 118 ksp_vsid_data = 0;
edd0622b
PM
119 slb_shadow_clear(2);
120 } else {
121 /* Update stack entry; others don't change */
1189be65
PM
122 slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2);
123 ksp_vsid_data = get_slb_shadow()->save_area[2].vsid;
edd0622b 124 }
2f6093c8 125
1da177e4
LT
126 /* We need to do this all in asm, so we're sure we don't touch
127 * the stack between the slbia and rebolting it. */
128 asm volatile("isync\n"
129 "slbia\n"
130 /* Slot 1 - first VMALLOC segment */
131 "slbmte %0,%1\n"
132 /* Slot 2 - kernel stack */
133 "slbmte %2,%3\n"
134 "isync"
1189be65
PM
135 :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)),
136 "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, 1)),
137 "r"(ksp_vsid_data),
1da177e4
LT
138 "r"(ksp_esid_data)
139 : "memory");
140}
141
67439b76
MN
142void slb_vmalloc_update(void)
143{
144 unsigned long vflags;
145
146 vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
1189be65 147 slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
67439b76
MN
148 slb_flush_and_rebolt();
149}
150
1da177e4
LT
151/* Flush all user entries from the segment table of the current processor. */
152void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
153{
154 unsigned long offset = get_paca()->slb_cache_ptr;
1189be65 155 unsigned long slbie_data = 0;
1da177e4
LT
156 unsigned long pc = KSTK_EIP(tsk);
157 unsigned long stack = KSTK_ESP(tsk);
158 unsigned long unmapped_base;
159
160 if (offset <= SLB_CACHE_ENTRIES) {
161 int i;
162 asm volatile("isync" : : : "memory");
163 for (i = 0; i < offset; i++) {
1189be65
PM
164 slbie_data = (unsigned long)get_paca()->slb_cache[i]
165 << SID_SHIFT; /* EA */
166 slbie_data |= user_segment_size(slbie_data)
167 << SLBIE_SSIZE_SHIFT;
168 slbie_data |= SLBIE_C; /* C set for user addresses */
169 asm volatile("slbie %0" : : "r" (slbie_data));
1da177e4
LT
170 }
171 asm volatile("isync" : : : "memory");
172 } else {
173 slb_flush_and_rebolt();
174 }
175
176 /* Workaround POWER5 < DD2.1 issue */
177 if (offset == 1 || offset > SLB_CACHE_ENTRIES)
1189be65 178 asm volatile("slbie %0" : : "r" (slbie_data));
1da177e4
LT
179
180 get_paca()->slb_cache_ptr = 0;
181 get_paca()->context = mm->context;
182
183 /*
184 * preload some userspace segments into the SLB.
185 */
186 if (test_tsk_thread_flag(tsk, TIF_32BIT))
187 unmapped_base = TASK_UNMAPPED_BASE_USER32;
188 else
189 unmapped_base = TASK_UNMAPPED_BASE_USER64;
190
51fae6de 191 if (is_kernel_addr(pc))
1da177e4
LT
192 return;
193 slb_allocate(pc);
194
195 if (GET_ESID(pc) == GET_ESID(stack))
196 return;
197
51fae6de 198 if (is_kernel_addr(stack))
1da177e4
LT
199 return;
200 slb_allocate(stack);
201
202 if ((GET_ESID(pc) == GET_ESID(unmapped_base))
203 || (GET_ESID(stack) == GET_ESID(unmapped_base)))
204 return;
205
51fae6de 206 if (is_kernel_addr(unmapped_base))
1da177e4
LT
207 return;
208 slb_allocate(unmapped_base);
209}
210
3c726f8d
BH
211static inline void patch_slb_encoding(unsigned int *insn_addr,
212 unsigned int immed)
213{
214 /* Assume the instruction had a "0" immediate value, just
215 * "or" in the new value
216 */
217 *insn_addr |= immed;
218 flush_icache_range((unsigned long)insn_addr, 4+
219 (unsigned long)insn_addr);
220}
221
1da177e4
LT
222void slb_initialize(void)
223{
bf72aeba 224 unsigned long linear_llp, vmalloc_llp, io_llp;
56291e19 225 unsigned long lflags, vflags;
3c726f8d
BH
226 static int slb_encoding_inited;
227 extern unsigned int *slb_miss_kernel_load_linear;
bf72aeba 228 extern unsigned int *slb_miss_kernel_load_io;
3c726f8d
BH
229
230 /* Prepare our SLB miss handler based on our page size */
231 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
bf72aeba
PM
232 io_llp = mmu_psize_defs[mmu_io_psize].sllp;
233 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
234 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
235
3c726f8d
BH
236 if (!slb_encoding_inited) {
237 slb_encoding_inited = 1;
238 patch_slb_encoding(slb_miss_kernel_load_linear,
239 SLB_VSID_KERNEL | linear_llp);
bf72aeba
PM
240 patch_slb_encoding(slb_miss_kernel_load_io,
241 SLB_VSID_KERNEL | io_llp);
3c726f8d
BH
242
243 DBG("SLB: linear LLP = %04x\n", linear_llp);
bf72aeba 244 DBG("SLB: io LLP = %04x\n", io_llp);
3c726f8d
BH
245 }
246
56291e19
SR
247 get_paca()->stab_rr = SLB_NUM_BOLTED;
248
1da177e4
LT
249 /* On iSeries the bolted entries have already been set up by
250 * the hypervisor from the lparMap data in head.S */
56291e19
SR
251 if (firmware_has_feature(FW_FEATURE_ISERIES))
252 return;
1da177e4 253
3c726f8d 254 lflags = SLB_VSID_KERNEL | linear_llp;
bf72aeba 255 vflags = SLB_VSID_KERNEL | vmalloc_llp;
1da177e4 256
3c726f8d 257 /* Invalidate the entire SLB (even slot 0) & all the ERATS */
175587cc
PM
258 asm volatile("isync":::"memory");
259 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
260 asm volatile("isync; slbia; isync":::"memory");
1189be65 261 create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
175587cc 262
1189be65 263 create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
175587cc
PM
264
265 /* We don't bolt the stack for the time being - we're in boot,
266 * so the stack is in the bolted segment. By the time it goes
267 * elsewhere, we'll call _switch() which will bolt in the new
268 * one. */
269 asm volatile("isync":::"memory");
1da177e4 270}