MIPS: Fix a typo error in AUDIT_ARCH definition
[linux-block.git] / arch / mips / kernel / mips-mt.c
CommitLineData
41c594ab
RB
1/*
2 * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
3 * Copyright (C) 2005 Mips Technologies, Inc
4 */
5
27a3bbaf 6#include <linux/device.h>
41c594ab
RB
7#include <linux/kernel.h>
8#include <linux/sched.h>
73bc256d 9#include <linux/export.h>
41c594ab 10#include <linux/interrupt.h>
f72af3cf 11#include <linux/security.h>
41c594ab
RB
12
13#include <asm/cpu.h>
14#include <asm/processor.h>
60063497 15#include <linux/atomic.h>
41c594ab
RB
16#include <asm/hardirq.h>
17#include <asm/mmu_context.h>
41c594ab
RB
18#include <asm/mipsmtregs.h>
19#include <asm/r4kcache.h>
20#include <asm/cacheflush.h>
21
07cc0c9e
RB
22int vpelimit;
23
24static int __init maxvpes(char *str)
25{
26 get_option(&str, &vpelimit);
27
28 return 1;
29}
30
31__setup("maxvpes=", maxvpes);
32
33int tclimit;
34
35static int __init maxtcs(char *str)
36{
37 get_option(&str, &tclimit);
38
39 return 1;
40}
41
42__setup("maxtcs=", maxtcs);
43
41c594ab
RB
44/*
45 * Dump new MIPS MT state for the core. Does not leave TCs halted.
46 * Takes an argument which taken to be a pre-call MVPControl value.
47 */
48
49void mips_mt_regdump(unsigned long mvpctl)
50{
51 unsigned long flags;
52 unsigned long vpflags;
53 unsigned long mvpconf0;
54 int nvpe;
55 int ntc;
56 int i;
57 int tc;
58 unsigned long haltval;
59 unsigned long tcstatval;
60#ifdef CONFIG_MIPS_MT_SMTC
61 void smtc_soft_dump(void);
62#endif /* CONFIG_MIPT_MT_SMTC */
63
64 local_irq_save(flags);
65 vpflags = dvpe();
66 printk("=== MIPS MT State Dump ===\n");
67 printk("-- Global State --\n");
68 printk(" MVPControl Passed: %08lx\n", mvpctl);
69 printk(" MVPControl Read: %08lx\n", vpflags);
70 printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0()));
71 nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
72 ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
73 printk("-- per-VPE State --\n");
d223a861
RB
74 for (i = 0; i < nvpe; i++) {
75 for (tc = 0; tc < ntc; tc++) {
41c594ab 76 settc(tc);
d223a861
RB
77 if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
78 printk(" VPE %d\n", i);
79 printk(" VPEControl : %08lx\n",
80 read_vpe_c0_vpecontrol());
81 printk(" VPEConf0 : %08lx\n",
82 read_vpe_c0_vpeconf0());
83 printk(" VPE%d.Status : %08lx\n",
84 i, read_vpe_c0_status());
b012cffe
RB
85 printk(" VPE%d.EPC : %08lx %pS\n",
86 i, read_vpe_c0_epc(),
87 (void *) read_vpe_c0_epc());
d223a861
RB
88 printk(" VPE%d.Cause : %08lx\n",
89 i, read_vpe_c0_cause());
90 printk(" VPE%d.Config7 : %08lx\n",
91 i, read_vpe_c0_config7());
92 break; /* Next VPE */
93 }
41c594ab 94 }
41c594ab
RB
95 }
96 printk("-- per-TC State --\n");
d223a861 97 for (tc = 0; tc < ntc; tc++) {
41c594ab 98 settc(tc);
d223a861 99 if (read_tc_c0_tcbind() == read_c0_tcbind()) {
41c594ab
RB
100 /* Are we dumping ourself? */
101 haltval = 0; /* Then we're not halted, and mustn't be */
102 tcstatval = flags; /* And pre-dump TCStatus is flags */
103 printk(" TC %d (current TC with VPE EPC above)\n", tc);
104 } else {
105 haltval = read_tc_c0_tchalt();
106 write_tc_c0_tchalt(1);
107 tcstatval = read_tc_c0_tcstatus();
108 printk(" TC %d\n", tc);
109 }
110 printk(" TCStatus : %08lx\n", tcstatval);
111 printk(" TCBind : %08lx\n", read_tc_c0_tcbind());
b012cffe
RB
112 printk(" TCRestart : %08lx %pS\n",
113 read_tc_c0_tcrestart(), (void *) read_tc_c0_tcrestart());
41c594ab
RB
114 printk(" TCHalt : %08lx\n", haltval);
115 printk(" TCContext : %08lx\n", read_tc_c0_tccontext());
116 if (!haltval)
117 write_tc_c0_tchalt(0);
118 }
119#ifdef CONFIG_MIPS_MT_SMTC
120 smtc_soft_dump();
121#endif /* CONFIG_MIPT_MT_SMTC */
122 printk("===========================\n");
123 evpe(vpflags);
124 local_irq_restore(flags);
125}
126
982f6ffe 127static int mt_opt_norps;
41c594ab
RB
128static int mt_opt_rpsctl = -1;
129static int mt_opt_nblsu = -1;
982f6ffe 130static int mt_opt_forceconfig7;
41c594ab
RB
131static int mt_opt_config7 = -1;
132
133static int __init rps_disable(char *s)
134{
135 mt_opt_norps = 1;
136 return 1;
137}
138__setup("norps", rps_disable);
139
140static int __init rpsctl_set(char *str)
141{
142 get_option(&str, &mt_opt_rpsctl);
143 return 1;
144}
145__setup("rpsctl=", rpsctl_set);
146
147static int __init nblsu_set(char *str)
148{
149 get_option(&str, &mt_opt_nblsu);
150 return 1;
151}
152__setup("nblsu=", nblsu_set);
153
154static int __init config7_set(char *str)
155{
156 get_option(&str, &mt_opt_config7);
157 mt_opt_forceconfig7 = 1;
158 return 1;
159}
160__setup("config7=", config7_set);
161
162/* Experimental cache flush control parameters that should go away some day */
982f6ffe
RB
163int mt_protiflush;
164int mt_protdflush;
41c594ab
RB
165int mt_n_iflushes = 1;
166int mt_n_dflushes = 1;
167
168static int __init set_protiflush(char *s)
169{
170 mt_protiflush = 1;
171 return 1;
172}
173__setup("protiflush", set_protiflush);
174
175static int __init set_protdflush(char *s)
176{
177 mt_protdflush = 1;
178 return 1;
179}
180__setup("protdflush", set_protdflush);
181
182static int __init niflush(char *s)
183{
184 get_option(&s, &mt_n_iflushes);
185 return 1;
186}
187__setup("niflush=", niflush);
188
189static int __init ndflush(char *s)
190{
191 get_option(&s, &mt_n_dflushes);
192 return 1;
193}
194__setup("ndflush=", ndflush);
41c594ab 195
982f6ffe 196static unsigned int itc_base;
41c594ab
RB
197
198static int __init set_itc_base(char *str)
199{
200 get_option(&str, &itc_base);
201 return 1;
202}
203
204__setup("itcbase=", set_itc_base);
205
206void mips_mt_set_cpuoptions(void)
207{
208 unsigned int oconfig7 = read_c0_config7();
209 unsigned int nconfig7 = oconfig7;
210
211 if (mt_opt_norps) {
6997991a 212 printk("\"norps\" option deprecated: use \"rpsctl=\"\n");
41c594ab
RB
213 }
214 if (mt_opt_rpsctl >= 0) {
215 printk("34K return prediction stack override set to %d.\n",
216 mt_opt_rpsctl);
217 if (mt_opt_rpsctl)
218 nconfig7 |= (1 << 2);
219 else
220 nconfig7 &= ~(1 << 2);
221 }
222 if (mt_opt_nblsu >= 0) {
223 printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu);
224 if (mt_opt_nblsu)
225 nconfig7 |= (1 << 5);
226 else
227 nconfig7 &= ~(1 << 5);
228 }
229 if (mt_opt_forceconfig7) {
230 printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7);
231 nconfig7 = mt_opt_config7;
232 }
233 if (oconfig7 != nconfig7) {
234 __asm__ __volatile("sync");
235 write_c0_config7(nconfig7);
49a89efb 236 ehb();
41c594ab
RB
237 printk("Config7: 0x%08x\n", read_c0_config7());
238 }
239
240 /* Report Cache management debug options */
241 if (mt_protiflush)
242 printk("I-cache flushes single-threaded\n");
243 if (mt_protdflush)
244 printk("D-cache flushes single-threaded\n");
245 if (mt_n_iflushes != 1)
246 printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes);
247 if (mt_n_dflushes != 1)
248 printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
249
41c594ab
RB
250 if (itc_base != 0) {
251 /*
252 * Configure ITC mapping. This code is very
253 * specific to the 34K core family, which uses
254 * a special mode bit ("ITC") in the ErrCtl
255 * register to enable access to ITC control
256 * registers via cache "tag" operations.
257 */
258 unsigned long ectlval;
259 unsigned long itcblkgrn;
260
261 /* ErrCtl register is known as "ecc" to Linux */
262 ectlval = read_c0_ecc();
263 write_c0_ecc(ectlval | (0x1 << 26));
264 ehb();
265#define INDEX_0 (0x80000000)
266#define INDEX_8 (0x80000008)
267 /* Read "cache tag" for Dcache pseudo-index 8 */
268 cache_op(Index_Load_Tag_D, INDEX_8);
269 ehb();
270 itcblkgrn = read_c0_dtaglo();
271 itcblkgrn &= 0xfffe0000;
272 /* Set for 128 byte pitch of ITC cells */
273 itcblkgrn |= 0x00000c00;
274 /* Stage in Tag register */
275 write_c0_dtaglo(itcblkgrn);
276 ehb();
277 /* Write out to ITU with CACHE op */
278 cache_op(Index_Store_Tag_D, INDEX_8);
279 /* Now set base address, and turn ITC on with 0x1 bit */
280 write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 );
281 ehb();
282 /* Write out to ITU with CACHE op */
283 cache_op(Index_Store_Tag_D, INDEX_0);
284 write_c0_ecc(ectlval);
285 ehb();
286 printk("Mapped %ld ITC cells starting at 0x%08x\n",
287 ((itcblkgrn & 0x7fe00000) >> 20), itc_base);
288 }
289}
290
291/*
292 * Function to protect cache flushes from concurrent execution
293 * depends on MP software model chosen.
294 */
295
296void mt_cflush_lockdown(void)
297{
298#ifdef CONFIG_MIPS_MT_SMTC
299 void smtc_cflush_lockdown(void);
300
301 smtc_cflush_lockdown();
302#endif /* CONFIG_MIPS_MT_SMTC */
303 /* FILL IN VSMP and AP/SP VERSIONS HERE */
304}
305
306void mt_cflush_release(void)
307{
308#ifdef CONFIG_MIPS_MT_SMTC
309 void smtc_cflush_release(void);
310
311 smtc_cflush_release();
312#endif /* CONFIG_MIPS_MT_SMTC */
313 /* FILL IN VSMP and AP/SP VERSIONS HERE */
314}
27a3bbaf
RB
315
316struct class *mt_class;
317
318static int __init mt_init(void)
319{
320 struct class *mtc;
321
322 mtc = class_create(THIS_MODULE, "mt");
323 if (IS_ERR(mtc))
324 return PTR_ERR(mtc);
325
326 mt_class = mtc;
327
328 return 0;
329}
330
331subsys_initcall(mt_init);