powerpc: Fix some missed ppc32 mm->context.id conversions
[linux-2.6-block.git] / arch / powerpc / mm / slb.c
CommitLineData
1da177e4
LT
1/*
2 * PowerPC64 SLB support.
3 *
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 * Based on earlier code writteh by:
6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
7 * Copyright (c) 2001 Dave Engebretsen
8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
3c726f8d
BH
17#undef DEBUG
18
1da177e4
LT
19#include <linux/config.h>
20#include <asm/pgtable.h>
21#include <asm/mmu.h>
22#include <asm/mmu_context.h>
23#include <asm/paca.h>
24#include <asm/cputable.h>
3c726f8d
BH
25#include <asm/cacheflush.h>
26
27#ifdef DEBUG
28#define DBG(fmt...) udbg_printf(fmt)
29#else
30#define DBG(fmt...)
31#endif
1da177e4 32
3c726f8d
BH
33extern void slb_allocate_realmode(unsigned long ea);
34extern void slb_allocate_user(unsigned long ea);
35
36static void slb_allocate(unsigned long ea)
37{
38 /* Currently, we do real mode for all SLBs including user, but
39 * that will change if we bring back dynamic VSIDs
40 */
41 slb_allocate_realmode(ea);
42}
1da177e4
LT
43
44static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot)
45{
46 return (ea & ESID_MASK) | SLB_ESID_V | slot;
47}
48
49static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
50{
51 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
52}
53
bb78cb72
OJ
54static inline void create_slbe(unsigned long ea, unsigned long flags,
55 unsigned long entry)
1da177e4
LT
56{
57 asm volatile("slbmte %0,%1" :
58 : "r" (mk_vsid_data(ea, flags)),
59 "r" (mk_esid_data(ea, entry))
60 : "memory" );
61}
62
63static void slb_flush_and_rebolt(void)
64{
65 /* If you change this make sure you change SLB_NUM_BOLTED
66 * appropriately too. */
3c726f8d 67 unsigned long linear_llp, virtual_llp, lflags, vflags;
1da177e4
LT
68 unsigned long ksp_esid_data;
69
70 WARN_ON(!irqs_disabled());
71
3c726f8d
BH
72 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
73 virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp;
74 lflags = SLB_VSID_KERNEL | linear_llp;
75 vflags = SLB_VSID_KERNEL | virtual_llp;
1da177e4
LT
76
77 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
b5666f70 78 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
1da177e4
LT
79 ksp_esid_data &= ~SLB_ESID_V;
80
81 /* We need to do this all in asm, so we're sure we don't touch
82 * the stack between the slbia and rebolting it. */
83 asm volatile("isync\n"
84 "slbia\n"
85 /* Slot 1 - first VMALLOC segment */
86 "slbmte %0,%1\n"
87 /* Slot 2 - kernel stack */
88 "slbmte %2,%3\n"
89 "isync"
14c89e7f
DG
90 :: "r"(mk_vsid_data(VMALLOC_START, vflags)),
91 "r"(mk_esid_data(VMALLOC_START, 1)),
3c726f8d 92 "r"(mk_vsid_data(ksp_esid_data, lflags)),
1da177e4
LT
93 "r"(ksp_esid_data)
94 : "memory");
95}
96
97/* Flush all user entries from the segment table of the current processor. */
98void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
99{
100 unsigned long offset = get_paca()->slb_cache_ptr;
101 unsigned long esid_data = 0;
102 unsigned long pc = KSTK_EIP(tsk);
103 unsigned long stack = KSTK_ESP(tsk);
104 unsigned long unmapped_base;
105
106 if (offset <= SLB_CACHE_ENTRIES) {
107 int i;
108 asm volatile("isync" : : : "memory");
109 for (i = 0; i < offset; i++) {
14b34661
DG
110 esid_data = ((unsigned long)get_paca()->slb_cache[i]
111 << SID_SHIFT) | SLBIE_C;
1da177e4
LT
112 asm volatile("slbie %0" : : "r" (esid_data));
113 }
114 asm volatile("isync" : : : "memory");
115 } else {
116 slb_flush_and_rebolt();
117 }
118
119 /* Workaround POWER5 < DD2.1 issue */
120 if (offset == 1 || offset > SLB_CACHE_ENTRIES)
121 asm volatile("slbie %0" : : "r" (esid_data));
122
123 get_paca()->slb_cache_ptr = 0;
124 get_paca()->context = mm->context;
125
126 /*
127 * preload some userspace segments into the SLB.
128 */
129 if (test_tsk_thread_flag(tsk, TIF_32BIT))
130 unmapped_base = TASK_UNMAPPED_BASE_USER32;
131 else
132 unmapped_base = TASK_UNMAPPED_BASE_USER64;
133
51fae6de 134 if (is_kernel_addr(pc))
1da177e4
LT
135 return;
136 slb_allocate(pc);
137
138 if (GET_ESID(pc) == GET_ESID(stack))
139 return;
140
51fae6de 141 if (is_kernel_addr(stack))
1da177e4
LT
142 return;
143 slb_allocate(stack);
144
145 if ((GET_ESID(pc) == GET_ESID(unmapped_base))
146 || (GET_ESID(stack) == GET_ESID(unmapped_base)))
147 return;
148
51fae6de 149 if (is_kernel_addr(unmapped_base))
1da177e4
LT
150 return;
151 slb_allocate(unmapped_base);
152}
153
3c726f8d
BH
154static inline void patch_slb_encoding(unsigned int *insn_addr,
155 unsigned int immed)
156{
157 /* Assume the instruction had a "0" immediate value, just
158 * "or" in the new value
159 */
160 *insn_addr |= immed;
161 flush_icache_range((unsigned long)insn_addr, 4+
162 (unsigned long)insn_addr);
163}
164
1da177e4
LT
165void slb_initialize(void)
166{
3c726f8d
BH
167 unsigned long linear_llp, virtual_llp;
168 static int slb_encoding_inited;
169 extern unsigned int *slb_miss_kernel_load_linear;
170 extern unsigned int *slb_miss_kernel_load_virtual;
171 extern unsigned int *slb_miss_user_load_normal;
172#ifdef CONFIG_HUGETLB_PAGE
173 extern unsigned int *slb_miss_user_load_huge;
174 unsigned long huge_llp;
175
176 huge_llp = mmu_psize_defs[mmu_huge_psize].sllp;
177#endif
178
179 /* Prepare our SLB miss handler based on our page size */
180 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
181 virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp;
182 if (!slb_encoding_inited) {
183 slb_encoding_inited = 1;
184 patch_slb_encoding(slb_miss_kernel_load_linear,
185 SLB_VSID_KERNEL | linear_llp);
186 patch_slb_encoding(slb_miss_kernel_load_virtual,
187 SLB_VSID_KERNEL | virtual_llp);
188 patch_slb_encoding(slb_miss_user_load_normal,
189 SLB_VSID_USER | virtual_llp);
190
191 DBG("SLB: linear LLP = %04x\n", linear_llp);
192 DBG("SLB: virtual LLP = %04x\n", virtual_llp);
193#ifdef CONFIG_HUGETLB_PAGE
194 patch_slb_encoding(slb_miss_user_load_huge,
195 SLB_VSID_USER | huge_llp);
196 DBG("SLB: huge LLP = %04x\n", huge_llp);
197#endif
198 }
199
1da177e4
LT
200 /* On iSeries the bolted entries have already been set up by
201 * the hypervisor from the lparMap data in head.S */
202#ifndef CONFIG_PPC_ISERIES
3c726f8d
BH
203 {
204 unsigned long lflags, vflags;
1da177e4 205
3c726f8d
BH
206 lflags = SLB_VSID_KERNEL | linear_llp;
207 vflags = SLB_VSID_KERNEL | virtual_llp;
1da177e4 208
3c726f8d
BH
209 /* Invalidate the entire SLB (even slot 0) & all the ERATS */
210 asm volatile("isync":::"memory");
211 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
1da177e4 212 asm volatile("isync; slbia; isync":::"memory");
b5666f70 213 create_slbe(PAGE_OFFSET, lflags, 0);
3c726f8d
BH
214
215 /* VMALLOC space has 4K pages always for now */
14c89e7f 216 create_slbe(VMALLOC_START, vflags, 1);
3c726f8d 217
1da177e4
LT
218 /* We don't bolt the stack for the time being - we're in boot,
219 * so the stack is in the bolted segment. By the time it goes
220 * elsewhere, we'll call _switch() which will bolt in the new
221 * one. */
222 asm volatile("isync":::"memory");
3c726f8d
BH
223 }
224#endif /* CONFIG_PPC_ISERIES */
1da177e4
LT
225
226 get_paca()->stab_rr = SLB_NUM_BOLTED;
227}