Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / arch / sparc / include / asm / spitfire.h
CommitLineData
a00736e9
SR
1/* spitfire.h: SpitFire/BlackBird/Cheetah inline MMU operations.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef _SPARC64_SPITFIRE_H
7#define _SPARC64_SPITFIRE_H
8
d34dd829
SR
9#ifdef CONFIG_SPARC64
10
a00736e9
SR
11#include <asm/asi.h>
12
13/* The following register addresses are accessible via ASI_DMMU
14 * and ASI_IMMU, that is there is a distinct and unique copy of
15 * each these registers for each TLB.
16 */
17#define TSB_TAG_TARGET 0x0000000000000000 /* All chips */
18#define TLB_SFSR 0x0000000000000018 /* All chips */
19#define TSB_REG 0x0000000000000028 /* All chips */
20#define TLB_TAG_ACCESS 0x0000000000000030 /* All chips */
21#define VIRT_WATCHPOINT 0x0000000000000038 /* All chips */
22#define PHYS_WATCHPOINT 0x0000000000000040 /* All chips */
23#define TSB_EXTENSION_P 0x0000000000000048 /* Ultra-III and later */
24#define TSB_EXTENSION_S 0x0000000000000050 /* Ultra-III and later, D-TLB only */
25#define TSB_EXTENSION_N 0x0000000000000058 /* Ultra-III and later */
26#define TLB_TAG_ACCESS_EXT 0x0000000000000060 /* Ultra-III+ and later */
27
28/* These registers only exist as one entity, and are accessed
29 * via ASI_DMMU only.
30 */
31#define PRIMARY_CONTEXT 0x0000000000000008
32#define SECONDARY_CONTEXT 0x0000000000000010
33#define DMMU_SFAR 0x0000000000000020
34#define VIRT_WATCHPOINT 0x0000000000000038
35#define PHYS_WATCHPOINT 0x0000000000000040
36
37#define SPITFIRE_HIGHEST_LOCKED_TLBENT (64 - 1)
38#define CHEETAH_HIGHEST_LOCKED_TLBENT (16 - 1)
39
40#define L1DCACHE_SIZE 0x4000
41
42#define SUN4V_CHIP_INVALID 0x00
43#define SUN4V_CHIP_NIAGARA1 0x01
44#define SUN4V_CHIP_NIAGARA2 0x02
15e3608d 45#define SUN4V_CHIP_NIAGARA3 0x03
08cefa9f
DM
46#define SUN4V_CHIP_NIAGARA4 0x04
47#define SUN4V_CHIP_NIAGARA5 0x05
cadbb580
AP
48#define SUN4V_CHIP_SPARC_M6 0x06
49#define SUN4V_CHIP_SPARC_M7 0x07
76950e6e 50#define SUN4V_CHIP_SPARC64X 0x8a
c5b8b5be 51#define SUN4V_CHIP_SPARC_SN 0x8b
a00736e9
SR
52#define SUN4V_CHIP_UNKNOWN 0xff
53
54#ifndef __ASSEMBLY__
55
56enum ultra_tlb_layout {
57 spitfire = 0,
58 cheetah = 1,
59 cheetah_plus = 2,
60 hypervisor = 3,
61};
62
63extern enum ultra_tlb_layout tlb_type;
64
65extern int sun4v_chip_type;
66
67extern int cheetah_pcache_forced_on;
f05a6865 68void cheetah_enable_pcache(void);
a00736e9
SR
69
70#define sparc64_highest_locked_tlbent() \
71 (tlb_type == spitfire ? \
72 SPITFIRE_HIGHEST_LOCKED_TLBENT : \
73 CHEETAH_HIGHEST_LOCKED_TLBENT)
74
75extern int num_kernel_image_mappings;
76
77/* The data cache is write through, so this just invalidates the
78 * specified line.
79 */
80static inline void spitfire_put_dcache_tag(unsigned long addr, unsigned long tag)
81{
82 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
83 "membar #Sync"
84 : /* No outputs */
85 : "r" (tag), "r" (addr), "i" (ASI_DCACHE_TAG));
86}
87
88/* The instruction cache lines are flushed with this, but note that
89 * this does not flush the pipeline. It is possible for a line to
90 * get flushed but stale instructions to still be in the pipeline,
91 * a flush instruction (to any address) is sufficient to handle
92 * this issue after the line is invalidated.
93 */
94static inline void spitfire_put_icache_tag(unsigned long addr, unsigned long tag)
95{
96 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
97 "membar #Sync"
98 : /* No outputs */
99 : "r" (tag), "r" (addr), "i" (ASI_IC_TAG));
100}
101
102static inline unsigned long spitfire_get_dtlb_data(int entry)
103{
104 unsigned long data;
105
106 __asm__ __volatile__("ldxa [%1] %2, %0"
107 : "=r" (data)
108 : "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS));
109
110 /* Clear TTE diag bits. */
111 data &= ~0x0003fe0000000000UL;
112
113 return data;
114}
115
116static inline unsigned long spitfire_get_dtlb_tag(int entry)
117{
118 unsigned long tag;
119
120 __asm__ __volatile__("ldxa [%1] %2, %0"
121 : "=r" (tag)
122 : "r" (entry << 3), "i" (ASI_DTLB_TAG_READ));
123 return tag;
124}
125
126static inline void spitfire_put_dtlb_data(int entry, unsigned long data)
127{
128 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
129 "membar #Sync"
130 : /* No outputs */
131 : "r" (data), "r" (entry << 3),
132 "i" (ASI_DTLB_DATA_ACCESS));
133}
134
135static inline unsigned long spitfire_get_itlb_data(int entry)
136{
137 unsigned long data;
138
139 __asm__ __volatile__("ldxa [%1] %2, %0"
140 : "=r" (data)
141 : "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS));
142
143 /* Clear TTE diag bits. */
144 data &= ~0x0003fe0000000000UL;
145
146 return data;
147}
148
149static inline unsigned long spitfire_get_itlb_tag(int entry)
150{
151 unsigned long tag;
152
153 __asm__ __volatile__("ldxa [%1] %2, %0"
154 : "=r" (tag)
155 : "r" (entry << 3), "i" (ASI_ITLB_TAG_READ));
156 return tag;
157}
158
159static inline void spitfire_put_itlb_data(int entry, unsigned long data)
160{
161 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
162 "membar #Sync"
163 : /* No outputs */
164 : "r" (data), "r" (entry << 3),
165 "i" (ASI_ITLB_DATA_ACCESS));
166}
167
168static inline void spitfire_flush_dtlb_nucleus_page(unsigned long page)
169{
170 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
171 "membar #Sync"
172 : /* No outputs */
173 : "r" (page | 0x20), "i" (ASI_DMMU_DEMAP));
174}
175
176static inline void spitfire_flush_itlb_nucleus_page(unsigned long page)
177{
178 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
179 "membar #Sync"
180 : /* No outputs */
181 : "r" (page | 0x20), "i" (ASI_IMMU_DEMAP));
182}
183
184/* Cheetah has "all non-locked" tlb flushes. */
185static inline void cheetah_flush_dtlb_all(void)
186{
187 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
188 "membar #Sync"
189 : /* No outputs */
190 : "r" (0x80), "i" (ASI_DMMU_DEMAP));
191}
192
193static inline void cheetah_flush_itlb_all(void)
194{
195 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
196 "membar #Sync"
197 : /* No outputs */
198 : "r" (0x80), "i" (ASI_IMMU_DEMAP));
199}
200
201/* Cheetah has a 4-tlb layout so direct access is a bit different.
202 * The first two TLBs are fully assosciative, hold 16 entries, and are
203 * used only for locked and >8K sized translations. One exists for
204 * data accesses and one for instruction accesses.
205 *
206 * The third TLB is for data accesses to 8K non-locked translations, is
207 * 2 way assosciative, and holds 512 entries. The fourth TLB is for
208 * instruction accesses to 8K non-locked translations, is 2 way
209 * assosciative, and holds 128 entries.
210 *
211 * Cheetah has some bug where bogus data can be returned from
212 * ASI_{D,I}TLB_DATA_ACCESS loads, doing the load twice fixes
213 * the problem for me. -DaveM
214 */
215static inline unsigned long cheetah_get_ldtlb_data(int entry)
216{
217 unsigned long data;
218
219 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
220 "ldxa [%1] %2, %0"
221 : "=r" (data)
222 : "r" ((0 << 16) | (entry << 3)),
223 "i" (ASI_DTLB_DATA_ACCESS));
224
225 return data;
226}
227
228static inline unsigned long cheetah_get_litlb_data(int entry)
229{
230 unsigned long data;
231
232 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
233 "ldxa [%1] %2, %0"
234 : "=r" (data)
235 : "r" ((0 << 16) | (entry << 3)),
236 "i" (ASI_ITLB_DATA_ACCESS));
237
238 return data;
239}
240
241static inline unsigned long cheetah_get_ldtlb_tag(int entry)
242{
243 unsigned long tag;
244
245 __asm__ __volatile__("ldxa [%1] %2, %0"
246 : "=r" (tag)
247 : "r" ((0 << 16) | (entry << 3)),
248 "i" (ASI_DTLB_TAG_READ));
249
250 return tag;
251}
252
253static inline unsigned long cheetah_get_litlb_tag(int entry)
254{
255 unsigned long tag;
256
257 __asm__ __volatile__("ldxa [%1] %2, %0"
258 : "=r" (tag)
259 : "r" ((0 << 16) | (entry << 3)),
260 "i" (ASI_ITLB_TAG_READ));
261
262 return tag;
263}
264
265static inline void cheetah_put_ldtlb_data(int entry, unsigned long data)
266{
267 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
268 "membar #Sync"
269 : /* No outputs */
270 : "r" (data),
271 "r" ((0 << 16) | (entry << 3)),
272 "i" (ASI_DTLB_DATA_ACCESS));
273}
274
275static inline void cheetah_put_litlb_data(int entry, unsigned long data)
276{
277 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
278 "membar #Sync"
279 : /* No outputs */
280 : "r" (data),
281 "r" ((0 << 16) | (entry << 3)),
282 "i" (ASI_ITLB_DATA_ACCESS));
283}
284
285static inline unsigned long cheetah_get_dtlb_data(int entry, int tlb)
286{
287 unsigned long data;
288
289 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
290 "ldxa [%1] %2, %0"
291 : "=r" (data)
292 : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_DATA_ACCESS));
293
294 return data;
295}
296
297static inline unsigned long cheetah_get_dtlb_tag(int entry, int tlb)
298{
299 unsigned long tag;
300
301 __asm__ __volatile__("ldxa [%1] %2, %0"
302 : "=r" (tag)
303 : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_TAG_READ));
304 return tag;
305}
306
307static inline void cheetah_put_dtlb_data(int entry, unsigned long data, int tlb)
308{
309 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
310 "membar #Sync"
311 : /* No outputs */
312 : "r" (data),
313 "r" ((tlb << 16) | (entry << 3)),
314 "i" (ASI_DTLB_DATA_ACCESS));
315}
316
317static inline unsigned long cheetah_get_itlb_data(int entry)
318{
319 unsigned long data;
320
321 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
322 "ldxa [%1] %2, %0"
323 : "=r" (data)
324 : "r" ((2 << 16) | (entry << 3)),
325 "i" (ASI_ITLB_DATA_ACCESS));
326
327 return data;
328}
329
330static inline unsigned long cheetah_get_itlb_tag(int entry)
331{
332 unsigned long tag;
333
334 __asm__ __volatile__("ldxa [%1] %2, %0"
335 : "=r" (tag)
336 : "r" ((2 << 16) | (entry << 3)), "i" (ASI_ITLB_TAG_READ));
337 return tag;
338}
339
340static inline void cheetah_put_itlb_data(int entry, unsigned long data)
341{
342 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
343 "membar #Sync"
344 : /* No outputs */
345 : "r" (data), "r" ((2 << 16) | (entry << 3)),
346 "i" (ASI_ITLB_DATA_ACCESS));
347}
348
349#endif /* !(__ASSEMBLY__) */
d34dd829 350#endif /* CONFIG_SPARC64 */
a00736e9 351#endif /* !(_SPARC64_SPITFIRE_H) */