[SPARC64]: Optimize fault kprobe handling just like powerpc.
[linux-2.6-block.git] / include / asm-sparc64 / spitfire.h
CommitLineData
1da177e4
LT
1/* $Id: spitfire.h,v 1.18 2001/11/29 16:42:10 kanoj Exp $
2 * spitfire.h: SpitFire/BlackBird/Cheetah inline MMU operations.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#ifndef _SPARC64_SPITFIRE_H
8#define _SPARC64_SPITFIRE_H
9
10#include <asm/asi.h>
11
12/* The following register addresses are accessible via ASI_DMMU
13 * and ASI_IMMU, that is there is a distinct and unique copy of
14 * each these registers for each TLB.
15 */
16#define TSB_TAG_TARGET 0x0000000000000000 /* All chips */
17#define TLB_SFSR 0x0000000000000018 /* All chips */
18#define TSB_REG 0x0000000000000028 /* All chips */
19#define TLB_TAG_ACCESS 0x0000000000000030 /* All chips */
20#define VIRT_WATCHPOINT 0x0000000000000038 /* All chips */
21#define PHYS_WATCHPOINT 0x0000000000000040 /* All chips */
22#define TSB_EXTENSION_P 0x0000000000000048 /* Ultra-III and later */
23#define TSB_EXTENSION_S 0x0000000000000050 /* Ultra-III and later, D-TLB only */
24#define TSB_EXTENSION_N 0x0000000000000058 /* Ultra-III and later */
25#define TLB_TAG_ACCESS_EXT 0x0000000000000060 /* Ultra-III+ and later */
26
27/* These registers only exist as one entity, and are accessed
28 * via ASI_DMMU only.
29 */
30#define PRIMARY_CONTEXT 0x0000000000000008
31#define SECONDARY_CONTEXT 0x0000000000000010
32#define DMMU_SFAR 0x0000000000000020
33#define VIRT_WATCHPOINT 0x0000000000000038
34#define PHYS_WATCHPOINT 0x0000000000000040
35
36#define SPITFIRE_HIGHEST_LOCKED_TLBENT (64 - 1)
37#define CHEETAH_HIGHEST_LOCKED_TLBENT (16 - 1)
38
39#define L1DCACHE_SIZE 0x4000
40
41#ifndef __ASSEMBLY__
42
43enum ultra_tlb_layout {
44 spitfire = 0,
45 cheetah = 1,
46 cheetah_plus = 2,
1633a53c 47 hypervisor = 3,
1da177e4
LT
48};
49
50extern enum ultra_tlb_layout tlb_type;
51
816242da
DM
52extern int cheetah_pcache_forced_on;
53extern void cheetah_enable_pcache(void);
54
1da177e4
LT
55#define sparc64_highest_locked_tlbent() \
56 (tlb_type == spitfire ? \
57 SPITFIRE_HIGHEST_LOCKED_TLBENT : \
58 CHEETAH_HIGHEST_LOCKED_TLBENT)
59
1da177e4
LT
60/* The data cache is write through, so this just invalidates the
61 * specified line.
62 */
63static __inline__ void spitfire_put_dcache_tag(unsigned long addr, unsigned long tag)
64{
65 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
66 "membar #Sync"
67 : /* No outputs */
68 : "r" (tag), "r" (addr), "i" (ASI_DCACHE_TAG));
1da177e4
LT
69}
70
71/* The instruction cache lines are flushed with this, but note that
72 * this does not flush the pipeline. It is possible for a line to
73 * get flushed but stale instructions to still be in the pipeline,
74 * a flush instruction (to any address) is sufficient to handle
75 * this issue after the line is invalidated.
76 */
77static __inline__ void spitfire_put_icache_tag(unsigned long addr, unsigned long tag)
78{
79 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
80 "membar #Sync"
81 : /* No outputs */
82 : "r" (tag), "r" (addr), "i" (ASI_IC_TAG));
83}
84
85static __inline__ unsigned long spitfire_get_dtlb_data(int entry)
86{
87 unsigned long data;
88
89 __asm__ __volatile__("ldxa [%1] %2, %0"
90 : "=r" (data)
91 : "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS));
92
93 /* Clear TTE diag bits. */
94 data &= ~0x0003fe0000000000UL;
95
96 return data;
97}
98
99static __inline__ unsigned long spitfire_get_dtlb_tag(int entry)
100{
101 unsigned long tag;
102
103 __asm__ __volatile__("ldxa [%1] %2, %0"
104 : "=r" (tag)
105 : "r" (entry << 3), "i" (ASI_DTLB_TAG_READ));
106 return tag;
107}
108
109static __inline__ void spitfire_put_dtlb_data(int entry, unsigned long data)
110{
111 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
112 "membar #Sync"
113 : /* No outputs */
114 : "r" (data), "r" (entry << 3),
115 "i" (ASI_DTLB_DATA_ACCESS));
116}
117
118static __inline__ unsigned long spitfire_get_itlb_data(int entry)
119{
120 unsigned long data;
121
122 __asm__ __volatile__("ldxa [%1] %2, %0"
123 : "=r" (data)
124 : "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS));
125
126 /* Clear TTE diag bits. */
127 data &= ~0x0003fe0000000000UL;
128
129 return data;
130}
131
132static __inline__ unsigned long spitfire_get_itlb_tag(int entry)
133{
134 unsigned long tag;
135
136 __asm__ __volatile__("ldxa [%1] %2, %0"
137 : "=r" (tag)
138 : "r" (entry << 3), "i" (ASI_ITLB_TAG_READ));
139 return tag;
140}
141
142static __inline__ void spitfire_put_itlb_data(int entry, unsigned long data)
143{
144 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
145 "membar #Sync"
146 : /* No outputs */
147 : "r" (data), "r" (entry << 3),
148 "i" (ASI_ITLB_DATA_ACCESS));
149}
150
1da177e4
LT
151static __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page)
152{
153 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
154 "membar #Sync"
155 : /* No outputs */
156 : "r" (page | 0x20), "i" (ASI_DMMU_DEMAP));
157}
158
159static __inline__ void spitfire_flush_itlb_nucleus_page(unsigned long page)
160{
161 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
162 "membar #Sync"
163 : /* No outputs */
164 : "r" (page | 0x20), "i" (ASI_IMMU_DEMAP));
165}
166
167/* Cheetah has "all non-locked" tlb flushes. */
168static __inline__ void cheetah_flush_dtlb_all(void)
169{
170 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
171 "membar #Sync"
172 : /* No outputs */
173 : "r" (0x80), "i" (ASI_DMMU_DEMAP));
174}
175
176static __inline__ void cheetah_flush_itlb_all(void)
177{
178 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
179 "membar #Sync"
180 : /* No outputs */
181 : "r" (0x80), "i" (ASI_IMMU_DEMAP));
182}
183
184/* Cheetah has a 4-tlb layout so direct access is a bit different.
185 * The first two TLBs are fully assosciative, hold 16 entries, and are
186 * used only for locked and >8K sized translations. One exists for
187 * data accesses and one for instruction accesses.
188 *
189 * The third TLB is for data accesses to 8K non-locked translations, is
190 * 2 way assosciative, and holds 512 entries. The fourth TLB is for
191 * instruction accesses to 8K non-locked translations, is 2 way
192 * assosciative, and holds 128 entries.
193 *
194 * Cheetah has some bug where bogus data can be returned from
195 * ASI_{D,I}TLB_DATA_ACCESS loads, doing the load twice fixes
196 * the problem for me. -DaveM
197 */
198static __inline__ unsigned long cheetah_get_ldtlb_data(int entry)
199{
200 unsigned long data;
201
202 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
203 "ldxa [%1] %2, %0"
204 : "=r" (data)
205 : "r" ((0 << 16) | (entry << 3)),
206 "i" (ASI_DTLB_DATA_ACCESS));
207
208 return data;
209}
210
211static __inline__ unsigned long cheetah_get_litlb_data(int entry)
212{
213 unsigned long data;
214
215 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
216 "ldxa [%1] %2, %0"
217 : "=r" (data)
218 : "r" ((0 << 16) | (entry << 3)),
219 "i" (ASI_ITLB_DATA_ACCESS));
220
221 return data;
222}
223
224static __inline__ unsigned long cheetah_get_ldtlb_tag(int entry)
225{
226 unsigned long tag;
227
228 __asm__ __volatile__("ldxa [%1] %2, %0"
229 : "=r" (tag)
230 : "r" ((0 << 16) | (entry << 3)),
231 "i" (ASI_DTLB_TAG_READ));
232
233 return tag;
234}
235
236static __inline__ unsigned long cheetah_get_litlb_tag(int entry)
237{
238 unsigned long tag;
239
240 __asm__ __volatile__("ldxa [%1] %2, %0"
241 : "=r" (tag)
242 : "r" ((0 << 16) | (entry << 3)),
243 "i" (ASI_ITLB_TAG_READ));
244
245 return tag;
246}
247
248static __inline__ void cheetah_put_ldtlb_data(int entry, unsigned long data)
249{
250 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
251 "membar #Sync"
252 : /* No outputs */
253 : "r" (data),
254 "r" ((0 << 16) | (entry << 3)),
255 "i" (ASI_DTLB_DATA_ACCESS));
256}
257
258static __inline__ void cheetah_put_litlb_data(int entry, unsigned long data)
259{
260 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
261 "membar #Sync"
262 : /* No outputs */
263 : "r" (data),
264 "r" ((0 << 16) | (entry << 3)),
265 "i" (ASI_ITLB_DATA_ACCESS));
266}
267
268static __inline__ unsigned long cheetah_get_dtlb_data(int entry, int tlb)
269{
270 unsigned long data;
271
272 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
273 "ldxa [%1] %2, %0"
274 : "=r" (data)
275 : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_DATA_ACCESS));
276
277 return data;
278}
279
280static __inline__ unsigned long cheetah_get_dtlb_tag(int entry, int tlb)
281{
282 unsigned long tag;
283
284 __asm__ __volatile__("ldxa [%1] %2, %0"
285 : "=r" (tag)
286 : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_TAG_READ));
287 return tag;
288}
289
290static __inline__ void cheetah_put_dtlb_data(int entry, unsigned long data, int tlb)
291{
292 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
293 "membar #Sync"
294 : /* No outputs */
295 : "r" (data),
296 "r" ((tlb << 16) | (entry << 3)),
297 "i" (ASI_DTLB_DATA_ACCESS));
298}
299
300static __inline__ unsigned long cheetah_get_itlb_data(int entry)
301{
302 unsigned long data;
303
304 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
305 "ldxa [%1] %2, %0"
306 : "=r" (data)
307 : "r" ((2 << 16) | (entry << 3)),
308 "i" (ASI_ITLB_DATA_ACCESS));
309
310 return data;
311}
312
313static __inline__ unsigned long cheetah_get_itlb_tag(int entry)
314{
315 unsigned long tag;
316
317 __asm__ __volatile__("ldxa [%1] %2, %0"
318 : "=r" (tag)
319 : "r" ((2 << 16) | (entry << 3)), "i" (ASI_ITLB_TAG_READ));
320 return tag;
321}
322
323static __inline__ void cheetah_put_itlb_data(int entry, unsigned long data)
324{
325 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
326 "membar #Sync"
327 : /* No outputs */
328 : "r" (data), "r" ((2 << 16) | (entry << 3)),
329 "i" (ASI_ITLB_DATA_ACCESS));
330}
331
332#endif /* !(__ASSEMBLY__) */
333
334#endif /* !(_SPARC64_SPITFIRE_H) */